Example usage for java.util LinkedList getLast

List of usage examples for java.util LinkedList getLast

Introduction

In this page you can find the example usage for java.util LinkedList getLast.

Prototype

public E getLast() 

Source Link

Document

Returns the last element in this list.

Usage

From source file:uk.ac.diamond.scisoft.analysis.rcp.inspector.InspectionTab.java

@Override
protected void populateCombos() {
    Combo c = combos.get(0);//from  w w  w.  j av a2s  .c  o  m
    c.removeAll();
    c.add(CONSTANT);
    String name = dataset == null ? null : dataset.getName();
    if (name == null || name.length() == 0)
        c.add(DATA);
    else
        c.add(name);
    c.setText(CONSTANT);
    if (paxes != null) {
        PlotAxisProperty p = paxes.get(0);
        p.setName(CONSTANT, false);
    }

    if (daxes != null && daxes.size() != 1) {
        super.populateCombos();
        return;
    }

    int cSize = combos.size() - comboOffset;
    LinkedList<String> sAxes = getAllAxisNames();
    int jmax = daxes.size();

    for (int i = 0; i < cSize; i++) {
        c = combos.get(i + comboOffset);
        c.removeAll();
        PlotAxisProperty p = paxes.get(i + comboOffset);

        String a;
        if (i < jmax) {
            a = daxes.get(i).getSelectedName();
            if (!sAxes.contains(a)) {
                a = sAxes.getLast();
            }
        } else {
            a = sAxes.getLast();
        }

        p.clear();
        int pmax = sAxes.size();
        for (int j = 0; j < pmax; j++) {
            String n = sAxes.get(j);
            p.put(j, n);
            c.add(n);
        }
        c.setText(a);
        sAxes.remove(a);
        p.setName(a, false);
        p.setInSet(true);
    }
}

From source file:de.tudarmstadt.ukp.wikipedia.parser.mediawiki.ModularParser.java

private NestedListContainer buildNestedList(SpanManager sm, ContentElementParsingParameters cepp,
        LinkedList<Span> lineSpans, lineType listType) {

    boolean numbered = listType == lineType.NESTEDLIST_NR;
    NestedListContainer result = new NestedListContainer(numbered);

    if (calculateSrcSpans) {
        result.setSrcSpan(new SrcSpan(sm.getSrcPos(lineSpans.getFirst().getStart()), -1));
    }/*from  ww w. ja v a2  s  .  c o  m*/

    LinkedList<Span> nestedListSpans = new LinkedList<Span>();
    while (!lineSpans.isEmpty()) {
        Span s = lineSpans.getFirst();
        if (listType != getLineType(sm, s)) {
            break;
        }
        nestedListSpans.add(new Span(s.getStart() + 1, s.getEnd()).trim(sm));
        lineSpans.removeFirst();
    }
    sm.manageList(nestedListSpans);

    if (calculateSrcSpans) {
        result.getSrcSpan().setEnd(sm.getSrcPos(nestedListSpans.getLast().getEnd()));
    }

    while (!nestedListSpans.isEmpty()) {
        Span s = nestedListSpans.getFirst();
        lineType t = getLineType(sm, s);
        if (t == lineType.NESTEDLIST || t == lineType.NESTEDLIST_NR) {
            result.add(buildNestedList(sm, cepp, nestedListSpans, t));
        } else {
            nestedListSpans.removeFirst();
            result.add((NestedListElement) parseContentElement(sm, cepp, s, new NestedListElement()));
        }
    }

    sm.removeManagedList(nestedListSpans);

    return result;
}

From source file:de.tudarmstadt.ukp.wikipedia.parser.mediawiki.ModularParser.java

private void getLineSpans(SpanManager sm, LinkedList<Span> lineSpans) {
    sm.manageList(lineSpans);/*  w  w w. j a v  a2 s  . c  om*/

    int start = 0;
    int end;

    while ((end = sm.indexOf(lineSeparator, start)) != -1) {
        lineSpans.add(new Span(start, end).trimTrail(sm));
        start = end + lineSeparator.length();
    }
    lineSpans.add(new Span(start, sm.length()).trimTrail(sm));

    while (!lineSpans.isEmpty() && lineSpans.getFirst().length() == 0) {
        lineSpans.removeFirst();
    }
    while (!lineSpans.isEmpty() && lineSpans.getLast().length() == 0) {
        lineSpans.removeLast();
    }
}

From source file:com.hipu.bdb.util.FileUtils.java

/**
 * Retrieve a number of lines from the file around the given 
 * position, as when paging forward or backward through a file. 
 * //from  ww w .  ja v a  2s  . com
 * @param file File to retrieve lines
 * @param position offset to anchor lines
 * @param signedDesiredLineCount lines requested; if negative, 
 *        want this number of lines ending with a line containing
 *        the position; if positive, want this number of lines,
 *        all starting at or after position. 
 * @param lines List<String> to insert found lines
 * @param lineEstimate int estimate of line size, 0 means use default
 *        of 128
 * @return LongRange indicating the file offsets corresponding to 
 *         the beginning of the first line returned, and the point
 *         after the end of the last line returned
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static LongRange pagedLines(File file, long position, int signedDesiredLineCount, List<String> lines,
        int lineEstimate) throws IOException {
    // consider negative positions as from end of file; -1 = last byte
    if (position < 0) {
        position = file.length() + position;
    }

    // calculate a reasonably sized chunk likely to have all desired lines
    if (lineEstimate == 0) {
        lineEstimate = 128;
    }
    int desiredLineCount = Math.abs(signedDesiredLineCount);
    long startPosition;
    long fileEnd = file.length();
    int bufferSize = (desiredLineCount + 5) * lineEstimate;
    if (signedDesiredLineCount > 0) {
        // reading forward; include previous char in case line-end
        startPosition = position - 1;
    } else {
        // reading backward
        startPosition = position - bufferSize + (2 * lineEstimate);
    }
    if (startPosition < 0) {
        startPosition = 0;
    }
    if (startPosition + bufferSize > fileEnd) {
        bufferSize = (int) (fileEnd - startPosition);
    }

    // read that reasonable chunk
    FileInputStream fis = new FileInputStream(file);
    fis.getChannel().position(startPosition);
    byte[] buf = new byte[bufferSize];
    IOUtils.closeQuietly(fis);

    // find all line starts fully in buffer
    // (positions after a line-end, per line-end definition in 
    // BufferedReader.readLine)
    LinkedList<Integer> lineStarts = new LinkedList<Integer>();
    if (startPosition == 0) {
        lineStarts.add(0);
    }
    boolean atLineEnd = false;
    boolean eatLF = false;
    int i;
    for (i = 0; i < bufferSize; i++) {
        if ((char) buf[i] == '\n' && eatLF) {
            eatLF = false;
            continue;
        }
        if (atLineEnd) {
            atLineEnd = false;
            lineStarts.add(i);
            if (signedDesiredLineCount < 0 && startPosition + i > position) {
                // reached next line past position, read no more
                break;
            }
        }
        if ((char) buf[i] == '\r') {
            atLineEnd = true;
            eatLF = true;
            continue;
        }
        if ((char) buf[i] == '\n') {
            atLineEnd = true;
        }
    }
    if (startPosition + i == fileEnd) {
        // add phantom lineStart after end
        lineStarts.add(bufferSize);
    }
    int foundFullLines = lineStarts.size() - 1;

    // if found no lines
    if (foundFullLines < 1) {
        if (signedDesiredLineCount > 0) {
            if (startPosition + bufferSize == fileEnd) {
                // nothing more to read: return nothing
                return new LongRange(fileEnd, fileEnd);
            } else {
                // retry with larger lineEstimate
                return pagedLines(file, position, signedDesiredLineCount, lines,
                        Math.max(bufferSize, lineEstimate));
            }

        } else {
            // try again with much larger line estimate
            // TODO: fail gracefully before growing to multi-MB buffers
            return pagedLines(file, position, signedDesiredLineCount, lines, bufferSize);
        }
    }

    // trim unneeded lines
    while (signedDesiredLineCount > 0 && startPosition + lineStarts.getFirst() < position) {
        // discard lines starting before desired position
        lineStarts.removeFirst();
    }
    while (lineStarts.size() > desiredLineCount + 1) {
        if (signedDesiredLineCount < 0 && (startPosition + lineStarts.get(1) <= position)) {
            // discard from front until reach line containing target position
            lineStarts.removeFirst();
        } else {
            lineStarts.removeLast();
        }
    }
    int firstLine = lineStarts.getFirst();
    int partialLine = lineStarts.getLast();
    LongRange range = new LongRange(startPosition + firstLine, startPosition + partialLine);
    List<String> foundLines = IOUtils
            .readLines(new ByteArrayInputStream(buf, firstLine, partialLine - firstLine));

    if (foundFullLines < desiredLineCount && signedDesiredLineCount < 0 && startPosition > 0) {
        // if needed and reading backward, read more lines from earlier
        range = expandRange(range, pagedLines(file, range.getMinimumLong() - 1,
                signedDesiredLineCount + foundFullLines, lines, bufferSize / foundFullLines));

    }

    lines.addAll(foundLines);

    if (signedDesiredLineCount < 0 && range.getMaximumLong() < position) {
        // did not get line containining start position
        range = expandRange(range, pagedLines(file, partialLine, 1, lines, bufferSize / foundFullLines));
    }

    if (signedDesiredLineCount > 0 && foundFullLines < desiredLineCount && range.getMaximumLong() < fileEnd) {
        // need more forward lines
        range = expandRange(range, pagedLines(file, range.getMaximumLong(), desiredLineCount - foundFullLines,
                lines, bufferSize / foundFullLines));
    }

    return range;
}

From source file:org.archive.util.FileUtils.java

/**
 * Retrieve a number of lines from the file around the given 
 * position, as when paging forward or backward through a file. 
 * /*from  ww  w. j a v a 2 s .c o  m*/
 * @param file File to retrieve lines
 * @param position offset to anchor lines
 * @param signedDesiredLineCount lines requested; if negative, 
 *        want this number of lines ending with a line containing
 *        the position; if positive, want this number of lines,
 *        all starting at or after position. 
 * @param lines List<String> to insert found lines
 * @param lineEstimate int estimate of line size, 0 means use default
 *        of 128
 * @return LongRange indicating the file offsets corresponding to 
 *         the beginning of the first line returned, and the point
 *         after the end of the last line returned
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static LongRange pagedLines(File file, long position, int signedDesiredLineCount, List<String> lines,
        int lineEstimate) throws IOException {
    // consider negative positions as from end of file; -1 = last byte
    if (position < 0) {
        position = file.length() + position;
    }

    // calculate a reasonably sized chunk likely to have all desired lines
    if (lineEstimate == 0) {
        lineEstimate = 128;
    }
    int desiredLineCount = Math.abs(signedDesiredLineCount);
    long startPosition;
    long fileEnd = file.length();
    int bufferSize = (desiredLineCount + 5) * lineEstimate;
    if (signedDesiredLineCount > 0) {
        // reading forward; include previous char in case line-end
        startPosition = position - 1;
    } else {
        // reading backward
        startPosition = position - bufferSize + (2 * lineEstimate);
    }
    if (startPosition < 0) {
        startPosition = 0;
    }
    if (startPosition + bufferSize > fileEnd) {
        bufferSize = (int) (fileEnd - startPosition);
    }

    // read that reasonable chunk
    FileInputStream fis = new FileInputStream(file);
    fis.getChannel().position(startPosition);
    byte[] buf = new byte[bufferSize];
    ArchiveUtils.readFully(fis, buf);
    IOUtils.closeQuietly(fis);

    // find all line starts fully in buffer
    // (positions after a line-end, per line-end definition in 
    // BufferedReader.readLine)
    LinkedList<Integer> lineStarts = new LinkedList<Integer>();
    if (startPosition == 0) {
        lineStarts.add(0);
    }
    boolean atLineEnd = false;
    boolean eatLF = false;
    int i;
    for (i = 0; i < bufferSize; i++) {
        if ((char) buf[i] == '\n' && eatLF) {
            eatLF = false;
            continue;
        }
        if (atLineEnd) {
            atLineEnd = false;
            lineStarts.add(i);
            if (signedDesiredLineCount < 0 && startPosition + i > position) {
                // reached next line past position, read no more
                break;
            }
        }
        if ((char) buf[i] == '\r') {
            atLineEnd = true;
            eatLF = true;
            continue;
        }
        if ((char) buf[i] == '\n') {
            atLineEnd = true;
        }
    }
    if (startPosition + i == fileEnd) {
        // add phantom lineStart after end
        lineStarts.add(bufferSize);
    }
    int foundFullLines = lineStarts.size() - 1;

    // if found no lines
    if (foundFullLines < 1) {
        if (signedDesiredLineCount > 0) {
            if (startPosition + bufferSize == fileEnd) {
                // nothing more to read: return nothing
                return new LongRange(fileEnd, fileEnd);
            } else {
                // retry with larger lineEstimate
                return pagedLines(file, position, signedDesiredLineCount, lines,
                        Math.max(bufferSize, lineEstimate));
            }

        } else {
            // try again with much larger line estimate
            // TODO: fail gracefully before growing to multi-MB buffers
            return pagedLines(file, position, signedDesiredLineCount, lines, bufferSize);
        }
    }

    // trim unneeded lines
    while (signedDesiredLineCount > 0 && startPosition + lineStarts.getFirst() < position) {
        // discard lines starting before desired position
        lineStarts.removeFirst();
    }
    while (lineStarts.size() > desiredLineCount + 1) {
        if (signedDesiredLineCount < 0 && (startPosition + lineStarts.get(1) <= position)) {
            // discard from front until reach line containing target position
            lineStarts.removeFirst();
        } else {
            lineStarts.removeLast();
        }
    }
    int firstLine = lineStarts.getFirst();
    int partialLine = lineStarts.getLast();
    LongRange range = new LongRange(startPosition + firstLine, startPosition + partialLine);
    List<String> foundLines = IOUtils
            .readLines(new ByteArrayInputStream(buf, firstLine, partialLine - firstLine));

    if (foundFullLines < desiredLineCount && signedDesiredLineCount < 0 && startPosition > 0) {
        // if needed and reading backward, read more lines from earlier
        range = expandRange(range, pagedLines(file, range.getMinimumLong() - 1,
                signedDesiredLineCount + foundFullLines, lines, bufferSize / foundFullLines));

    }

    lines.addAll(foundLines);

    if (signedDesiredLineCount < 0 && range.getMaximumLong() < position) {
        // did not get line containining start position
        range = expandRange(range, pagedLines(file, partialLine, 1, lines, bufferSize / foundFullLines));
    }

    if (signedDesiredLineCount > 0 && foundFullLines < desiredLineCount && range.getMaximumLong() < fileEnd) {
        // need more forward lines
        range = expandRange(range, pagedLines(file, range.getMaximumLong(), desiredLineCount - foundFullLines,
                lines, bufferSize / foundFullLines));
    }

    return range;
}

From source file:de.tudarmstadt.ukp.wikipedia.parser.mediawiki.ModularParser.java

/**
 * Building a ContentElement, this funciton is calles by all the other
 * parseContentElement(..) functions/*from  w ww.  j a v a 2  s . co  m*/
 */
private ContentElement parseContentElement(SpanManager sm, ContentElementParsingParameters cepp,
        LinkedList<Span> lineSpans, ContentElement result) {

    List<Link> localLinks = new ArrayList<Link>();
    List<Template> localTemplates = new ArrayList<Template>();

    List<Span> boldSpans = new ArrayList<Span>();
    List<Span> italicSpans = new ArrayList<Span>();
    sm.manageList(boldSpans);
    sm.manageList(italicSpans);

    List<Span> managedSpans = new ArrayList<Span>();
    sm.manageList(managedSpans);

    Span contentElementRange = new Span(lineSpans.getFirst().getStart(), lineSpans.getLast().getEnd()).trim(sm);
    managedSpans.add(contentElementRange);

    // set the SrcSpan
    if (calculateSrcSpans) {
        result.setSrcSpan(new SrcSpan(sm.getSrcPos(contentElementRange.getStart()),
                sm.getSrcPos(contentElementRange.getEnd())));
    }

    sm.manageList(lineSpans);
    while (!lineSpans.isEmpty()) {
        Span line = lineSpans.getFirst();

        parseBoldAndItalicSpans(sm, line, boldSpans, italicSpans);

        // External links
        parseExternalLinks(sm, line, "http://", managedSpans, localLinks, result);
        parseExternalLinks(sm, line, "https://", managedSpans, localLinks, result);
        parseExternalLinks(sm, line, "ftp://", managedSpans, localLinks, result);
        parseExternalLinks(sm, line, "mailto:", managedSpans, localLinks, result);

        // end of linewhise opperations
        lineSpans.removeFirst();
    }
    sm.removeManagedList(lineSpans);

    // Links
    int i;
    i = 0;
    while (i < cepp.linkSpans.size()) {
        if (contentElementRange.hits(cepp.linkSpans.get(i))) {
            Span linkSpan = cepp.linkSpans.remove(i);
            managedSpans.add(linkSpan);
            Link l = cepp.links.remove(i).setHomeElement(result);
            localLinks.add(l);
            if (!showImageText && l.getType() == Link.type.IMAGE) {
                // deletes the Image Text from the ContentElement Text.
                sm.delete(linkSpan);
            }
        } else {
            i++;
        }
    }

    // Templates
    i = 0;
    while (i < cepp.templateSpans.size()) {
        Span ts = cepp.templateSpans.get(i);
        if (contentElementRange.hits(ts)) {
            ResolvedTemplate rt = cepp.templates.remove(i);

            if (rt.getPostParseReplacement() != null) {
                sm.replace(ts, rt.getPostParseReplacement());
            }
            cepp.templateSpans.remove(i);

            Object parsedObject = rt.getParsedObject();
            if (parsedObject != null) {
                managedSpans.add(ts);

                Class parsedObjectClass = parsedObject.getClass();
                if (parsedObjectClass == Template.class) {
                    localTemplates.add((Template) parsedObject);
                } else if (parsedObjectClass == Link.class) {
                    localLinks.add(((Link) parsedObject).setHomeElement(result));
                } else {
                    localTemplates.add(rt.getTemplate());
                }
            }
        } else {
            i++;
        }
    }

    // HTML/XML Tags
    i = 0;
    List<Span> tags = new ArrayList<Span>();
    while (i < cepp.tagSpans.size()) {
        Span s = cepp.tagSpans.get(i);
        if (contentElementRange.hits(s)) {
            cepp.tagSpans.remove(i);
            if (deleteTags) {
                sm.delete(s);
            } else {
                tags.add(s);
                managedSpans.add(s);
            }
        } else {
            i++;
        }
    }

    // noWiki
    i = 0;
    List<Span> localNoWikiSpans = new ArrayList<Span>();
    while (i < cepp.noWikiSpans.size()) {
        Span s = cepp.noWikiSpans.get(i);
        if (contentElementRange.hits(s)) {
            cepp.noWikiSpans.remove(i);
            sm.replace(s, cepp.noWikiStrings.remove(i));
            localNoWikiSpans.add(s);
            managedSpans.add(s);
        } else {
            i++;
        }
    }

    // MATH Tags
    i = 0;
    List<Span> mathSpans = new ArrayList<Span>();
    while (i < cepp.mathSpans.size()) {
        Span s = cepp.mathSpans.get(i);
        if (contentElementRange.hits(s)) {
            cepp.mathSpans.remove(i);

            if (showMathTagContent) {
                mathSpans.add(s);
                managedSpans.add(s);
                sm.replace(s, cepp.mathStrings.remove(i));
            } else {
                sm.delete(s);
            }
        } else {
            i++;
        }
    }

    result.setText(sm.substring(contentElementRange));

    // managed spans must be removed here and not earlier, because every
    // change in the SpanManager affects the Spans!
    sm.removeManagedList(boldSpans);
    sm.removeManagedList(italicSpans);
    sm.removeManagedList(managedSpans);

    // contentElementRange ist auch noch in managedSpans !!! deswegen:
    final int adjust = -contentElementRange.getStart();
    for (Span s : boldSpans) {
        s.adjust(adjust);
    }
    for (Span s : italicSpans) {
        s.adjust(adjust);
    }
    for (Span s : managedSpans) {
        s.adjust(adjust);
    }

    result.setFormatSpans(FormatType.BOLD, boldSpans);
    result.setFormatSpans(FormatType.ITALIC, italicSpans);
    result.setFormatSpans(FormatType.TAG, tags);
    result.setFormatSpans(FormatType.MATH, mathSpans);
    result.setFormatSpans(FormatType.NOWIKI, localNoWikiSpans);

    result.setLinks(sortLinks(localLinks));
    result.setTemplates(sortTemplates(localTemplates));

    return result;
}

From source file:net.netheos.pcsapi.providers.googledrive.GoogleDrive.java

/**
 * Resolve the given CPath to gather informations (mainly id and mimeType) ; returns a RemotePath object.
 *
 * Drive API does not allow this natively ; we perform a single request that returns all files (but may return too
 * much) : find files with title='a' or title='b' or title='c', then we connect children and parents to get the
 * chain of ids. TODO This fails if there are several folders with same name, and we follow the wrong branch
 *//*from  ww w. ja va  2s .c  o  m*/
private RemotePath findRemotePath(CPath path, boolean detailed) {
    // easy special case :
    if (path.isRoot()) {
        return new RemotePath(path, new LinkedList<JSONObject>());
    }
    // Here we know that we have at least one path segment

    // Build query (cf. https://developers.google.com/drive/web/search-parameters)
    List<String> segments = path.split();
    StringBuilder query = new StringBuilder("(");
    int i = 0;
    for (String segment : segments) {
        if (i > 0) {
            query.append(" or ");
        }
        query.append("(title='").append(segment.replace("'", "\\'")) // escape ' --> \'
                .append("'");
        // for all but last segment, we enforce file to be a directory
        // TODO this creates looong query string, is that interesting ?
        //if (i < segments.size()-1) {
        //   q.append(" and mimeType='").append(MIME_TYPE_DIRECTORY).append("'");
        query.append(")");
        i++;
    }
    query.append(") and trashed = false");

    // drive may not return all results in a single query :
    // FIXME ouch there seems to be some issues with pagination on the google side ?
    // http://stackoverflow.com/questions/18646004/drive-api-files-list-query-with-not-parameter-returns-empty-pages?rq=1
    // http://stackoverflow.com/questions/18355113/paging-in-files-list-returns-endless-number-of-empty-pages?rq=1
    // http://stackoverflow.com/questions/19679190/is-paging-broken-in-drive?rq=1
    // http://stackoverflow.com/questions/16186264/files-list-reproducibly-returns-incomplete-list-in-drive-files-scope
    List<JSONObject> items = new ArrayList<JSONObject>(segments.size());
    String nextPageToken = null;
    while (true) {
        // Execute request ; we ask for specific fields only
        String fieldsFilter = "id,title,mimeType,parents/id,parents/isRoot";
        if (detailed) {
            fieldsFilter += ",downloadUrl,modifiedDate,fileSize";
        }
        fieldsFilter = "nextPageToken,items(" + fieldsFilter + ")";
        URIBuilder builder = new URIBuilder(URI.create(FILES_ENDPOINT));
        builder.addParameter("q", query.toString());
        builder.addParameter("fields", fieldsFilter);
        if (nextPageToken != null) {
            builder.addParameter("pageToken", nextPageToken);
        }
        builder.addParameter("maxResults", "1000");

        HttpGet request = new HttpGet(builder.build());
        RequestInvoker<CResponse> ri = getApiRequestInvoker(request, null);
        JSONObject jresp = retryStrategy.invokeRetry(ri).asJSONObject();
        JSONArray itemsInPage = jresp.getJSONArray("items");
        for (i = 0; i < itemsInPage.length(); i++) {
            items.add(itemsInPage.getJSONObject(i));
        }
        // Is it the last page ?
        nextPageToken = jresp.optString("nextPageToken", null);
        if (nextPageToken != null) {
            LOGGER.debug("findRemotePath() will loop : ({} items in this page)", itemsInPage.length());
        } else {
            LOGGER.debug("findRemotePath() : no more data for this query");
            break;
        }
    }

    // Now connect parent/children to build the path :
    LinkedList<JSONObject> filesChain = new LinkedList<JSONObject>();
    i = 0;
    for (String searchedSegment : segments) {
        boolean firstSegment = (i == 0); // this changes parent condition (isRoot, or no parent for shares)
        //            boolean lastSegment = ( i == segments.size() - 1 );
        i++;
        //print("searching segment ",searched_segment)
        JSONObject nextItem = null;
        for (JSONObject item : items) {
            //print("examaning item=",item)
            // We match title
            // FIXME and enforce type is directory if not last segment :
            if (item.getString("title").equals(searchedSegment)) {
                // && (last_segment or item['mimeType'] == self.MIME_TYPE_DIRECTORY)):
                JSONArray parents = item.optJSONArray("parents");
                if (firstSegment) {
                    if (parents == null || parents.length() == 0) { // no parents (shared folder ?)
                        nextItem = item;
                        break;
                    }
                    for (int k = 0; k < parents.length(); k++) {
                        JSONObject p = parents.getJSONObject(k);
                        if (p.getBoolean("isRoot")) { // at least one parent is root
                            nextItem = item;
                            break;
                        }
                    }
                } else {
                    for (int k = 0; k < parents.length(); k++) {
                        JSONObject p = parents.getJSONObject(k);
                        if (p.getString("id").equals(filesChain.getLast().getString("id"))) {
                            //at least one parent id is last parent id
                            nextItem = item;
                            break;
                        }
                    }
                }
                if (nextItem != null) {
                    break;
                }
            }
        }
        if (nextItem == null) {
            break;
        }
        filesChain.add(nextItem);
    }
    return new RemotePath(path, filesChain);
}

From source file:org.dbpedia.spotlight.mediawiki.ModularParser.java

/**
 * Building a ContentElement, this funciton is calles by all the other
 * parseContentElement(..) functions//from  w ww.j a va  2 s .c om
 */
private ContentElement parseContentElement(SpanManager sm, ContentElementParsingParameters cepp,
        LinkedList<Span> lineSpans, ContentElement result) {

    List<Link> localLinks = new ArrayList<Link>();
    List<Template> localTemplates = new ArrayList<Template>();

    List<Span> boldSpans = new ArrayList<Span>();
    List<Span> italicSpans = new ArrayList<Span>();
    sm.manageList(boldSpans);
    sm.manageList(italicSpans);

    List<Span> managedSpans = new ArrayList<Span>();
    sm.manageList(managedSpans);

    Span contentElementRange = new Span(lineSpans.getFirst().getStart(), lineSpans.getLast().getEnd()).trim(sm);
    managedSpans.add(contentElementRange);

    // set the SrcSpan
    if (calculateSrcSpans) {
        result.setSrcSpan(new SrcSpan(sm.getSrcPos(contentElementRange.getStart()),
                sm.getSrcPos(contentElementRange.getEnd())));
    }

    sm.manageList(lineSpans);
    while (!lineSpans.isEmpty()) {
        Span line = lineSpans.getFirst();

        parseBoldAndItalicSpans(sm, line, boldSpans, italicSpans);

        // External links
        parseExternalLinks(sm, line, "http://", managedSpans, localLinks, result);
        parseExternalLinks(sm, line, "https://", managedSpans, localLinks, result);
        parseExternalLinks(sm, line, "ftp://", managedSpans, localLinks, result);
        parseExternalLinks(sm, line, "mailto:", managedSpans, localLinks, result);

        // end of linewhise opperations
        lineSpans.removeFirst();
    }
    sm.removeManagedList(lineSpans);

    // Links
    int i;
    i = 0;
    while (i < cepp.linkSpans.size()) {
        if (contentElementRange.hits(cepp.linkSpans.get(i))) {
            Span linkSpan = cepp.linkSpans.remove(i);
            managedSpans.add(linkSpan);
            Link l = cepp.links.remove(i).setHomeElement(result);
            localLinks.add(l);
            if (!showImageText && l.getType() == Link.type.IMAGE) {
                // deletes the Image Text from the ContentElement Text.
                sm.delete(linkSpan);
            }
        } else {
            i++;
        }
    }

    // Templates
    //DBPedia - Spotlight. Removing the boiler plate logic from the wikitext
    //Commenting the Templates Logic
    /*      i = 0;
          while (i < cepp.templateSpans.size())
          {
             Span ts = cepp.templateSpans.get(i);
             if (contentElementRange.hits(ts))
             {
    ResolvedTemplate rt = cepp.templates.remove(i);
            
    if (rt.getPostParseReplacement() != null)
    {
       sm.replace(ts, rt.getPostParseReplacement());
    }
    cepp.templateSpans.remove(i);
            
    Object parsedObject = rt.getParsedObject();
    if (parsedObject != null)
    {
       managedSpans.add(ts);
            
       Class parsedObjectClass = parsedObject.getClass();
       if (parsedObjectClass == Template.class)
       {
          localTemplates.add((Template) parsedObject);
       }
       else if (parsedObjectClass == Link.class)
       {
          localLinks.add(((Link) parsedObject)
                .setHomeElement(result));
       }
       else
       {
          localTemplates.add(rt.getTemplate());
       }
    }
             }
             else
             {
    i++;
             }
          }
    */
    // HTML/XML Tags
    i = 0;
    List<Span> tags = new ArrayList<Span>();
    while (i < cepp.tagSpans.size()) {
        Span s = cepp.tagSpans.get(i);
        if (contentElementRange.hits(s)) {
            cepp.tagSpans.remove(i);
            if (deleteTags) {
                sm.delete(s);
            } else {
                tags.add(s);
                managedSpans.add(s);
            }
        } else {
            i++;
        }
    }

    // noWiki
    i = 0;
    List<Span> localNoWikiSpans = new ArrayList<Span>();
    while (i < cepp.noWikiSpans.size()) {
        Span s = cepp.noWikiSpans.get(i);
        if (contentElementRange.hits(s)) {
            cepp.noWikiSpans.remove(i);
            sm.replace(s, cepp.noWikiStrings.remove(i));
            localNoWikiSpans.add(s);
            managedSpans.add(s);
        } else {
            i++;
        }
    }

    // MATH Tags
    i = 0;
    List<Span> mathSpans = new ArrayList<Span>();
    while (i < cepp.mathSpans.size()) {
        Span s = cepp.mathSpans.get(i);
        if (contentElementRange.hits(s)) {
            cepp.mathSpans.remove(i);

            if (showMathTagContent) {
                mathSpans.add(s);
                managedSpans.add(s);
                sm.replace(s, cepp.mathStrings.remove(i));
            } else {
                sm.delete(s);
            }
        } else {
            i++;
        }
    }

    result.setText(sm.substring(contentElementRange));

    // managed spans must be removed here and not earlier, because every
    // change in the SpanManager affects the Spans!
    sm.removeManagedList(boldSpans);
    sm.removeManagedList(italicSpans);
    sm.removeManagedList(managedSpans);

    // contentElementRange ist auch noch in managedSpans !!! deswegen:
    final int adjust = -contentElementRange.getStart();
    for (Span s : boldSpans) {
        s.adjust(adjust);
    }
    for (Span s : italicSpans) {
        s.adjust(adjust);
    }
    for (Span s : managedSpans) {
        s.adjust(adjust);
    }

    result.setFormatSpans(FormatType.BOLD, boldSpans);
    result.setFormatSpans(FormatType.ITALIC, italicSpans);
    result.setFormatSpans(FormatType.TAG, tags);
    result.setFormatSpans(FormatType.MATH, mathSpans);
    result.setFormatSpans(FormatType.NOWIKI, localNoWikiSpans);

    result.setLinks(sortLinks(localLinks));
    result.setTemplates(sortTemplates(localTemplates));

    return result;
}

From source file:org.epics.archiverappliance.retrieval.DataRetrievalServlet.java

private void doGetSinglePV(HttpServletRequest req, HttpServletResponse resp)
        throws ServletException, IOException {

    PoorMansProfiler pmansProfiler = new PoorMansProfiler();
    String pvName = req.getParameter("pv");

    if (configService.getStartupState() != STARTUP_SEQUENCE.STARTUP_COMPLETE) {
        String msg = "Cannot process data retrieval requests for PV " + pvName
                + " until the appliance has completely started up.";
        logger.error(msg);/*from   w  w  w  .  j a va2  s  .  c o  m*/
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE, msg);
        return;
    }

    String startTimeStr = req.getParameter("from");
    String endTimeStr = req.getParameter("to");
    boolean useReduced = false;
    String useReducedStr = req.getParameter("usereduced");
    if (useReducedStr != null && !useReducedStr.equals("")) {
        try {
            useReduced = Boolean.parseBoolean(useReducedStr);
        } catch (Exception ex) {
            logger.error("Exception parsing usereduced", ex);
            useReduced = false;
        }
    }
    String extension = req.getPathInfo().split("\\.")[1];
    logger.info("Mime is " + extension);

    boolean useChunkedEncoding = true;
    String doNotChunkStr = req.getParameter("donotchunk");
    if (doNotChunkStr != null && !doNotChunkStr.equals("false")) {
        logger.info("Turning off HTTP chunked encoding");
        useChunkedEncoding = false;
    }

    boolean fetchLatestMetadata = false;
    String fetchLatestMetadataStr = req.getParameter("fetchLatestMetadata");
    if (fetchLatestMetadataStr != null && fetchLatestMetadataStr.equals("true")) {
        logger.info("Adding a call to the engine to fetch the latest metadata");
        fetchLatestMetadata = true;
    }

    // For data retrieval we need a PV info. However, in case of PV's that have long since retired, we may not want to have PVTypeInfo's in the system.
    // So, we support a template PV that lays out the data sources.
    // During retrieval, you can pass in the PV as a template and we'll clone this and make a temporary copy.
    String retiredPVTemplate = req.getParameter("retiredPVTemplate");

    if (pvName == null) {
        String msg = "PV name is null.";
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
        return;
    }

    if (pvName.equals(ARCH_APPL_PING_PV)) {
        logger.debug("Processing ping PV - this is used to validate the connection with the client.");
        processPingPV(req, resp);
        return;
    }

    if (pvName.endsWith(".VAL")) {
        int len = pvName.length();
        pvName = pvName.substring(0, len - 4);
        logger.info("Removing .VAL from pvName for request giving " + pvName);
    }

    // ISO datetimes are of the form "2011-02-02T08:00:00.000Z"
    Timestamp end = TimeUtils.plusHours(TimeUtils.now(), 1);
    if (endTimeStr != null) {
        try {
            end = TimeUtils.convertFromISO8601String(endTimeStr);
        } catch (IllegalArgumentException ex) {
            try {
                end = TimeUtils.convertFromDateTimeStringWithOffset(endTimeStr);
            } catch (IllegalArgumentException ex2) {
                String msg = "Cannot parse time" + endTimeStr;
                logger.warn(msg, ex2);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
        }
    }

    // We get one day by default
    Timestamp start = TimeUtils.minusDays(end, 1);
    if (startTimeStr != null) {
        try {
            start = TimeUtils.convertFromISO8601String(startTimeStr);
        } catch (IllegalArgumentException ex) {
            try {
                start = TimeUtils.convertFromDateTimeStringWithOffset(startTimeStr);
            } catch (IllegalArgumentException ex2) {
                String msg = "Cannot parse time " + startTimeStr;
                logger.warn(msg, ex2);
                resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
                resp.sendError(HttpServletResponse.SC_BAD_REQUEST, msg);
                return;
            }
        }
    }

    if (end.before(start)) {
        String msg = "For request, end " + end.toString() + " is before start " + start.toString() + " for pv "
                + pvName;
        logger.error(msg);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_BAD_REQUEST);
        return;
    }

    LinkedList<TimeSpan> requestTimes = new LinkedList<TimeSpan>();

    // We can specify a list of time stamp pairs using the optional timeranges parameter
    String timeRangesStr = req.getParameter("timeranges");
    if (timeRangesStr != null) {
        boolean continueWithRequest = parseTimeRanges(resp, pvName, requestTimes, timeRangesStr);
        if (!continueWithRequest) {
            // Cannot parse the time ranges properly; we so abort the request.
            return;
        }

        // Override the start and the end so that the mergededup consumer works correctly.
        start = requestTimes.getFirst().getStartTime();
        end = requestTimes.getLast().getEndTime();

    } else {
        requestTimes.add(new TimeSpan(start, end));
    }

    assert (requestTimes.size() > 0);

    String postProcessorUserArg = req.getParameter("pp");
    if (pvName.contains("(")) {
        if (!pvName.contains(")")) {
            logger.error("Unbalanced paran " + pvName);
            resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST);
            return;
        }
        String[] components = pvName.split("[(,)]");
        postProcessorUserArg = components[0];
        pvName = components[1];
        if (components.length > 2) {
            for (int i = 2; i < components.length; i++) {
                postProcessorUserArg = postProcessorUserArg + "_" + components[i];
            }
        }
        logger.info("After parsing the function call syntax pvName is " + pvName
                + " and postProcessorUserArg is " + postProcessorUserArg);
    }

    PostProcessor postProcessor = PostProcessors.findPostProcessor(postProcessorUserArg);

    PVTypeInfo typeInfo = PVNames.determineAppropriatePVTypeInfo(pvName, configService);
    pmansProfiler.mark("After PVTypeInfo");

    if (typeInfo == null && RetrievalState.includeExternalServers(req)) {
        logger.debug("Checking to see if pv " + pvName + " is served by a external Archiver Server");
        typeInfo = checkIfPVisServedByExternalServer(pvName, start, req, resp, useChunkedEncoding);
    }

    if (typeInfo == null) {
        if (resp.isCommitted()) {
            logger.debug("Proxied the data thru an external server for PV " + pvName);
            return;
        }
    }

    if (typeInfo == null) {
        if (retiredPVTemplate != null) {
            PVTypeInfo templateTypeInfo = PVNames.determineAppropriatePVTypeInfo(retiredPVTemplate,
                    configService);
            if (templateTypeInfo != null) {
                typeInfo = new PVTypeInfo(pvName, templateTypeInfo);
                typeInfo.setPaused(true);
                typeInfo.setApplianceIdentity(configService.getMyApplianceInfo().getIdentity());
                // Somehow tell the code downstream that this is a fake typeInfo.
                typeInfo.setSamplingMethod(SamplingMethod.DONT_ARCHIVE);
                logger.debug("Using a template PV for " + pvName + " Need to determine the actual DBR type.");
                setActualDBRTypeFromData(pvName, typeInfo, configService);
            }
        }
    }

    if (typeInfo == null) {
        logger.error("Unable to find typeinfo for pv " + pvName);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_NOT_FOUND);
        return;
    }

    if (postProcessor == null) {
        if (useReduced) {
            String defaultPPClassName = configService.getInstallationProperties().getProperty(
                    "org.epics.archiverappliance.retrieval.DefaultUseReducedPostProcessor",
                    FirstSamplePP.class.getName());
            logger.debug("Using the default usereduced preprocessor " + defaultPPClassName);
            try {
                postProcessor = (PostProcessor) Class.forName(defaultPPClassName).newInstance();
            } catch (Exception ex) {
                logger.error("Exception constructing new instance of post processor " + defaultPPClassName, ex);
                postProcessor = null;
            }
        }
    }

    if (postProcessor == null) {
        logger.debug("Using the default raw preprocessor");
        postProcessor = new DefaultRawPostProcessor();
    }

    ApplianceInfo applianceForPV = configService.getApplianceForPV(pvName);
    if (applianceForPV == null) {
        // TypeInfo cannot be null here...
        assert (typeInfo != null);
        applianceForPV = configService.getAppliance(typeInfo.getApplianceIdentity());
    }

    if (!applianceForPV.equals(configService.getMyApplianceInfo())) {
        // Data for pv is elsewhere. Proxy/redirect and return.
        proxyRetrievalRequest(req, resp, pvName, useChunkedEncoding,
                applianceForPV.getRetrievalURL() + "/../data");
        return;
    }

    pmansProfiler.mark("After Appliance Info");

    String pvNameFromRequest = pvName;

    String fieldName = PVNames.getFieldName(pvName);
    if (fieldName != null && !fieldName.equals("") && !pvName.equals(typeInfo.getPvName())) {
        logger.debug("We reset the pvName " + pvName + " to one from the typeinfo " + typeInfo.getPvName()
                + " as that determines the name of the stream. Also using ExtraFieldsPostProcessor");
        pvName = typeInfo.getPvName();
        postProcessor = new ExtraFieldsPostProcessor(fieldName);
    }

    try {
        // Postprocessors get their mandatory arguments from the request.
        // If user does not pass in the expected request, throw an exception.
        postProcessor.initialize(postProcessorUserArg, pvName);
    } catch (Exception ex) {
        logger.error("Postprocessor threw an exception during initialization for " + pvName, ex);
        resp.addHeader(MimeResponse.ACCESS_CONTROL_ALLOW_ORIGIN, "*");
        resp.sendError(HttpServletResponse.SC_NOT_FOUND);
        return;
    }

    try (BasicContext retrievalContext = new BasicContext(typeInfo.getDBRType(), pvNameFromRequest);
            MergeDedupConsumer mergeDedupCountingConsumer = createMergeDedupConsumer(resp, extension,
                    useChunkedEncoding);
            RetrievalExecutorResult executorResult = determineExecutorForPostProcessing(pvName, typeInfo,
                    requestTimes, req, postProcessor)) {
        HashMap<String, String> engineMetadata = null;
        if (fetchLatestMetadata) {
            // Make a call to the engine to fetch the latest metadata.
            engineMetadata = fetchLatestMedataFromEngine(pvName, applianceForPV);
        }

        LinkedList<Future<RetrievalResult>> retrievalResultFutures = resolveAllDataSources(pvName, typeInfo,
                postProcessor, applianceForPV, retrievalContext, executorResult, req, resp);
        pmansProfiler.mark("After data source resolution");

        long s1 = System.currentTimeMillis();
        String currentlyProcessingPV = null;

        List<Future<EventStream>> eventStreamFutures = getEventStreamFuturesFromRetrievalResults(executorResult,
                retrievalResultFutures);

        logger.debug(
                "Done with the RetrievalResult's; moving onto the individual event stream from each source for "
                        + pvName);
        pmansProfiler.mark("After retrieval results");

        for (Future<EventStream> future : eventStreamFutures) {
            EventStreamDesc sourceDesc = null;
            try (EventStream eventStream = future.get()) {
                sourceDesc = null; // Reset it for each loop iteration.
                sourceDesc = eventStream.getDescription();
                if (sourceDesc == null) {
                    logger.warn("Skipping event stream without a desc for pv " + pvName);
                    continue;
                }

                logger.debug("Processing event stream for pv " + pvName + " from source "
                        + ((eventStream.getDescription() != null) ? eventStream.getDescription().getSource()
                                : " unknown"));

                try {
                    mergeTypeInfo(typeInfo, sourceDesc, engineMetadata);
                } catch (MismatchedDBRTypeException mex) {
                    logger.error(mex.getMessage(), mex);
                    continue;
                }

                if (currentlyProcessingPV == null || !currentlyProcessingPV.equals(pvName)) {
                    logger.debug("Switching to new PV " + pvName
                            + " In some mime responses we insert special headers at the beginning of the response. Calling the hook for that");
                    currentlyProcessingPV = pvName;
                    mergeDedupCountingConsumer.processingPV(currentlyProcessingPV, start, end,
                            (eventStream != null) ? sourceDesc : null);
                }

                try {
                    // If the postProcessor does not have a consolidated event stream, we send each eventstream across as we encounter it.
                    // Else we send the consolidatedEventStream down below.
                    if (!(postProcessor instanceof PostProcessorWithConsolidatedEventStream)) {
                        mergeDedupCountingConsumer.consumeEventStream(eventStream);
                        resp.flushBuffer();
                    }
                } catch (Exception ex) {
                    if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
                        // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
                        logger.debug(
                                "Exception when consuming and flushing data from " + sourceDesc.getSource(),
                                ex);
                    } else {
                        logger.error("Exception when consuming and flushing data from " + sourceDesc.getSource()
                                + "-->" + ex.toString(), ex);
                    }
                }
                pmansProfiler.mark("After event stream " + eventStream.getDescription().getSource());
            } catch (Exception ex) {
                if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
                    // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
                    logger.debug("Exception when consuming and flushing data from "
                            + (sourceDesc != null ? sourceDesc.getSource() : "N/A"), ex);
                } else {
                    logger.error("Exception when consuming and flushing data from "
                            + (sourceDesc != null ? sourceDesc.getSource() : "N/A") + "-->" + ex.toString(),
                            ex);
                }
            }
        }

        if (postProcessor instanceof PostProcessorWithConsolidatedEventStream) {
            try (EventStream eventStream = ((PostProcessorWithConsolidatedEventStream) postProcessor)
                    .getConsolidatedEventStream()) {
                EventStreamDesc sourceDesc = eventStream.getDescription();
                if (sourceDesc == null) {
                    logger.error("Skipping event stream without a desc for pv " + pvName
                            + " and post processor " + postProcessor.getExtension());
                } else {
                    mergeDedupCountingConsumer.consumeEventStream(eventStream);
                    resp.flushBuffer();
                }
            }
        }

        // If the postProcessor needs to send final data across, give it a chance now...
        if (postProcessor instanceof AfterAllStreams) {
            EventStream finalEventStream = ((AfterAllStreams) postProcessor).anyFinalData();
            if (finalEventStream != null) {
                mergeDedupCountingConsumer.consumeEventStream(finalEventStream);
                resp.flushBuffer();
            }
        }

        pmansProfiler.mark("After writing all eventstreams to response");

        long s2 = System.currentTimeMillis();
        logger.info("For the complete request, found a total of "
                + mergeDedupCountingConsumer.totalEventsForAllPVs + " in " + (s2 - s1) + "(ms)" + " skipping "
                + mergeDedupCountingConsumer.skippedEventsForAllPVs + " events" + " deduping involved "
                + mergeDedupCountingConsumer.comparedEventsForAllPVs + " compares.");
    } catch (Exception ex) {
        if (ex != null && ex.toString() != null && ex.toString().contains("ClientAbortException")) {
            // We check for ClientAbortException etc this way to avoid including tomcat jars in the build path.
            logger.debug("Exception when retrieving data ", ex);
        } else {
            logger.error("Exception when retrieving data " + "-->" + ex.toString(), ex);
        }
    }
    pmansProfiler.mark("After all closes and flushing all buffers");

    // Till we determine all the if conditions where we log this, we log sparingly..
    if (pmansProfiler.totalTimeMS() > 5000) {
        logger.error("Retrieval time for " + pvName + " from " + startTimeStr + " to " + endTimeStr
                + pmansProfiler.toString());
    }
}

From source file:edu.harvard.iq.dvn.ingest.dsb.impl.DvnRJobRequest.java

public List<List<String>> getValueRange(String tkn) {

    dbgLog.fine("received token=" + tkn);
    String step0 = StringUtils.strip(tkn);
    dbgLog.fine("step0=" + step0);

    // string into tokens
    String[] step1raw = step0.split(",");

    dbgLog.fine("step1raw=" + StringUtils.join(step1raw, ","));

    // remove meaningless commas if exist

    List<String> step1 = new ArrayList<String>();

    for (String el : step1raw) {
        if (!el.equals("")) {
            step1.add(el);//from www. java 2s  . com
        }
    }

    dbgLog.fine("step1=" + StringUtils.join(step1, ","));

    List<List<String>> rangeData = new ArrayList<List<String>>();

    // for each token, check the range operator

    for (int i = 0; i < step1.size(); i++) {
        LinkedList<String> tmp = new LinkedList<String>(
                Arrays.asList(String2StringArray(String.valueOf(step1.get(i)))));

        Map<String, String> token = new HashMap<String, String>();
        boolean rangeMode = false;

        // .get(i) below CAN'T possibly be right (??) -- replacing
        // it with .get(0). -- L.A., v3.6
        //if ((!tmp.get(i).equals("[")) && (!tmp.get(i).equals("("))){
        if ((!tmp.get(0).equals("[")) && (!tmp.get(0).equals("("))) {
            // no LHS range operator
            // assume [
            token.put("start", "3");
        } else if (tmp.get(0).equals("[")) {
            rangeMode = true;
            token.put("start", "3");
            tmp.removeFirst();
        } else if (tmp.get(0).equals("(")) {
            rangeMode = true;
            token.put("start", "5");
            tmp.removeFirst();
        }

        if ((!tmp.getLast().equals("]")) && (!tmp.getLast().equals(")"))) {
            // no RHS range operator
            // assume ]
            token.put("end", "4");
        } else if (tmp.getLast().equals("]")) {
            rangeMode = true;
            tmp.removeLast();
            token.put("end", "4");
        } else if (tmp.getLast().equals(")")) {
            rangeMode = true;
            tmp.removeLast();
            token.put("end", "6");
        }

        // I'm now enforcing the following rules:
        // the "rangeMode" above - a range must have at least one range
        // operator, a square bracket or parenthesis, on one end, at
        // least; i.e., either on the left, or on the right. 
        // If there are no range operators, even if there are dashes
        // inside the token, they are not going to be interpreted as 
        // range definitions.  
        // still TODO: (possibly?) add more validation; figure out how 
        // to encode *date* ranges ("-" is not optimal, since dates already
        // contain dashes... although, since dates are (supposed to be) 
        // normalized it should still be possible to parse it unambiguously)
        //          -- L.A., v3.6

        if (rangeMode) {
            // after these steps, the string does not have range operators;
            // i.e., '-9--3', '--9', '-9-','-9', '-1-1', '1', '3-4', '6-'

            if ((tmp.get(0).equals("!")) && (tmp.get(1).equals("="))) {
                // != negation string is found
                token.put("start", "2");
                token.put("end", "");
                token.put("v1", StringUtils.join(tmp.subList(2, tmp.size()), ""));
                token.put("v2", "");
                dbgLog.fine("value=" + StringUtils.join(tmp.subList(2, tmp.size()), ","));

            } else if ((tmp.get(0).equals("-")) && (tmp.get(1).equals("-"))) {
                // type 2: --9
                token.put("v1", "");
                tmp.removeFirst();
                token.put("v2", StringUtils.join(tmp, ""));
            } else if ((tmp.get(0).equals("-")) && (tmp.getLast().equals("-"))) {
                // type 3: -9-
                token.put("v2", "");
                tmp.removeLast();
                token.put("v1", StringUtils.join(tmp, ""));
            } else if ((!tmp.get(0).equals("-")) && (tmp.getLast().equals("-"))) {
                // type 8: 6-
                token.put("v2", "");
                tmp.removeLast();
                token.put("v1", StringUtils.join(tmp, ""));
            } else {
                int count = 0;
                List<Integer> index = new ArrayList<Integer>();
                for (int j = 0; j < tmp.size(); j++) {
                    if (tmp.get(j).equals("-")) {
                        count++;
                        index.add(j);
                    }
                }

                if (count >= 2) {
                    // range type
                    // divide the second hyphen
                    // types 1 and 5: -9--3, -1-1
                    // token.put("v1", StringUtils.join(tmp[0..($index[1]-1)],"" ));
                    token.put("v2", StringUtils.join(tmp.subList((index.get(1) + 1), tmp.size()), ""));

                } else if (count == 1) {
                    if (tmp.get(0).equals("-")) {
                        // point negative type
                        // type 4: -9 or -inf,9
                        // do nothing
                        if ((token.get("start").equals("5"))
                                && ((token.get("end").equals("6")) || (token.get("end").equals("4")))) {
                            token.put("v1", "");
                            tmp.removeFirst();
                            token.put("v2", StringUtils.join(tmp, ""));
                        } else {
                            token.put("v1", StringUtils.join(tmp, ""));
                            token.put("v2", StringUtils.join(tmp, ""));
                        }
                    } else {
                        // type 7: 3-4
                        // both positive value and range type
                        String[] vset = (StringUtils.join(tmp, "")).split("-");
                        token.put("v1", vset[0]);
                        token.put("v2", vset[1]);
                    }

                } else {
                    // type 6: 1
                    token.put("v1", StringUtils.join(tmp, ""));
                    token.put("v2", StringUtils.join(tmp, ""));
                }
            }
        } else {
            // assume that this is NOT a range; treat the entire sequence 
            // of symbols as a single token:
            // type 6: 1
            token.put("v1", StringUtils.join(tmp, ""));
            token.put("v2", StringUtils.join(tmp, ""));
        }

        dbgLog.fine(i + "-th result=" + token.get("start") + "|" + token.get("v1") + "|" + token.get("end")
                + "|" + token.get("v2"));

        List<String> rangeSet = new ArrayList<String>();
        rangeSet.add(token.get("start"));
        rangeSet.add(token.get("v1"));
        rangeSet.add(token.get("end"));
        rangeSet.add(token.get("v2"));
        rangeData.add(rangeSet);

    }

    dbgLog.fine("rangeData:\n" + rangeData);
    return rangeData;
}