Example usage for java.util ListIterator previous

List of usage examples for java.util ListIterator previous

Introduction

In this page you can find the example usage for java.util ListIterator previous.

Prototype

E previous();

Source Link

Document

Returns the previous element in the list and moves the cursor position backwards.

Usage

From source file:it.polito.tellmefirst.web.rest.clients.ClientEpub.java

private HashMap<String, String> parseEpub(File file) throws IOException, TMFVisibleException {

    LOG.debug("[parseEpub] - BEGIN");

    ZipFile fi = new ZipFile(file);

    for (Enumeration e = fi.entries(); e.hasMoreElements();) {
        ZipEntry entry = (ZipEntry) e.nextElement();
        if (entry.getName().endsWith("ncx")) {
            InputStream tocMaybeDirty = fi.getInputStream(entry);
            Scanner scanner = new Scanner(tocMaybeDirty, "UTF-8").useDelimiter("\\A");
            String theString = scanner.hasNext() ? scanner.next() : "";
            tocMaybeDirty.close();//from   w  ww. j  a va  2  s .c o m
            scanner.close();

            String res = theString.replaceAll(">[\\s]*?<", "><");

            InputStream toc = new ByteArrayInputStream(res.getBytes(StandardCharsets.UTF_8));

            try {
                DocumentBuilder dBuilder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
                Document doc = dBuilder.parse(toc);
                toc.close();

                if (doc.hasChildNodes()) {
                    findNavMap(doc.getChildNodes());
                }
            } catch (Exception ex) {
                LOG.error("Unable to navigate the TOC");
            }

            removeEmptyTOC(epub);

            //search anchors in links and split
            Set set = epub.entrySet();
            Iterator i = set.iterator();
            while (i.hasNext()) {
                Map.Entry me = (Map.Entry) i.next();
                if (me.getValue().toString().contains("#")) {
                    String[] parts = me.getValue().toString().split("#");
                    String anchor = parts[1];
                    epub.put(me.getKey().toString(), anchor);
                }
            }
        }
        if (entry.getName().endsWith("opf")) { //manage files because order is important
            InputStream content = fi.getInputStream(entry);

            Scanner scanner = new Scanner(content, "UTF-8").useDelimiter("\\A");
            String contentString = scanner.hasNext() ? scanner.next() : "";
            content.close();
            scanner.close();

            String filenameRegex = "href=\"(.*.htm(|l))\".*media-type=\"application/xhtml";
            Pattern pattern = Pattern.compile(filenameRegex);
            Matcher matcher = pattern.matcher(contentString);

            Integer count = 0;
            while (matcher.find()) {
                files.put(count, matcher.group(1));
                count++;
            }
        }
        if (entry.getName().endsWith("html") || entry.getName().endsWith("htm")
                || entry.getName().endsWith("xhtml")) {
            InputStream htmlFile = fi.getInputStream(entry);

            Scanner scanner = new Scanner(htmlFile, "UTF-8").useDelimiter("\\A");
            String htmlString = scanner.hasNext() ? scanner.next() : "";

            String regex1 = htmlString.replaceAll("^[^_]*?<body>", ""); //remove head
            String regex2 = regex1.replaceAll("</body>.*$", ""); //remove tail
            String htmlCleaned = regex2.replaceAll("<a.*?/>", ""); //anchor with one tag

            String[] bits = entry.getName().split("/");
            String fileName = bits[bits.length - 1];

            htmls.put(fileName, htmlCleaned);
        }
    }
    fi.close();
    Integer i;
    for (i = 0; i < files.size(); i++) {
        stringBuilder.append("<p id=\"" + files.get(i) + "\"></p>"); // "anchor" also the heads of each files
        stringBuilder.append(htmls.get(files.get(i)));
    }
    String htmlAll = stringBuilder.toString();

    /* We have all needed files, start to split
       For each link -> made a chunk
       Start from the bottom */
    Metadata metadata = new Metadata();
    Parser parser = new HtmlParser();
    ListIterator<Map.Entry<String, String>> iter = new ArrayList<>(epub.entrySet()).listIterator(epub.size());

    while (iter.hasPrevious()) {
        Map.Entry<String, String> me = iter.previous();
        try {
            ContentHandler contenthandler = new BodyContentHandler(10 * htmlAll.length());
            Scanner sc = new Scanner(htmlAll);
            sc.useDelimiter("id=\"" + me.getValue().toString() + "\">");
            htmlAll = sc.next();
            InputStream stream = new ByteArrayInputStream(sc.next().getBytes(StandardCharsets.UTF_8));
            parser.parse(stream, contenthandler, metadata, new ParseContext());
            String chapterText = contenthandler.toString().toLowerCase().replaceAll("\\d+.*", "");
            String chapterTextWithoutNo = chapterText.replaceAll("\\d+.*", "");
            // Remove the Project Gutenberg meta information from the text
            String chapterTextCleaned = chapterTextWithoutNo.split("end of the project gutenberg ebook")[0];
            epub.put(me.getKey().toString(), chapterTextCleaned);

        } catch (Exception ex) {
            LOG.error("Unable to parse content for index: " + me.getKey() + ", this chapter will be deleted");
            removeChapter(epub, me.getKey().toString());
        }
    }

    /* I remove the Project Gutenberg license chapter from the Map, because it is useless
      for the classification and it generates a Lucene Exception in case of the Italian language
      (the license text is always in English).
            
      You can use this method in order to remove each chapter that is useless for classifying
      your Epub document. */
    removeChapter(epub, "A Word from Project Gutenberg");
    removeEmptyItems(epub);

    //If the Epub file has a bad structure, I try to use the basic Epub extractor of Tika.
    if (epub.size() == 0) {
        LOG.info("The Epub file has a bad structure. Try to use the Tika extractor");
        epub.put("All text", autoParseAll(file));
    }

    removeEmptyItems(epub);

    if (epub.size() == 0) {
        LOG.error("Unable to extract text from this Epub");
        throw new TMFVisibleException("Unable to extract any text from this Epub.");
    }

    removeDownloadedFile(TEMPORARY_PATH);

    LOG.debug("[parseEpub] - END");

    return epub;
}

From source file:org.apache.jmeter.report.processor.ExternalSampleSorter.java

private List<Sample> merge(List<Sample> left, List<Sample> right) {
    ArrayList<Sample> out = new ArrayList<>();
    ListIterator<Sample> l = left.listIterator();
    ListIterator<Sample> r = right.listIterator();
    while (l.hasNext() || r.hasNext()) {
        if (l.hasNext() && r.hasNext()) {
            Sample firstLeft = l.next();
            Sample firstRight = r.next();
            if (!revertedSort && sampleComparator.compare(firstLeft, firstRight) < 0
                    || revertedSort && sampleComparator.compare(firstLeft, firstRight) >= 0) {
                out.add(firstLeft);//from w ww .  ja  va2  s.c  o m
                r.previous();
            } else {
                out.add(firstRight);
                l.previous();
            }
        } else if (l.hasNext()) {
            out.add(l.next());
        } else if (r.hasNext()) {
            out.add(r.next());
        }
    }
    return out;
}

From source file:org.springframework.ldap.core.DistinguishedName.java

/**
 * Determines if this <code>DistinguishedName</code> ends with a certian
 * path.//w w w .j a  v a 2  s  .  com
 * 
 * If the argument path is empty (no names in path) this method will return
 * <code>false</code>.
 * 
 * @param name The suffix to check for.
 * 
 */
public boolean endsWith(Name name) {
    DistinguishedName path = null;
    if (name instanceof DistinguishedName) {
        path = (DistinguishedName) name;
    } else {
        return false;
    }

    List shortlist = path.getNames();

    // this path must be at least as long
    if (getNames().size() < shortlist.size())
        return false;

    // must have names
    if (shortlist.size() == 0)
        return false;

    ListIterator longiter = getNames().listIterator(getNames().size());
    ListIterator shortiter = shortlist.listIterator(shortlist.size());

    while (shortiter.hasPrevious()) {
        LdapRdn longname = (LdapRdn) longiter.previous();
        LdapRdn shortname = (LdapRdn) shortiter.previous();

        if (!longname.equals(shortname))
            return false;
    }

    // if short list ended, all were equal
    return true;

}

From source file:edu.umn.msi.tropix.persistence.dao.hibernate.TropixObjectDaoImpl.java

public TropixObject getGroupDirectoryPath(final String userId, final List<String> pathParts) {
    final StringBuilder joins = new StringBuilder(), wheres = new StringBuilder();
    final ListIterator<String> pathPartsIter = pathParts.listIterator(pathParts.size());
    final LinkedList<String> parameters = Lists.newLinkedList();
    while (pathPartsIter.hasPrevious()) {
        int index = pathPartsIter.previousIndex();
        final String pathPart = pathPartsIter.previous();
        wheres.append(String.format(" and o%d.deletedTime is null", index));
        wheres.append(String.format(" and o%d.committed is true", index));
        addConstraintForPathPart(pathPart, index, wheres, parameters);
        if (pathPartsIter.hasPrevious()) {
            int nextObjectBackIndex = pathPartsIter.previousIndex();
            joins.append(/*www .  j a v  a 2 s.  c om*/
                    String.format(" inner join o%d.permissionParents as o%d ", index, nextObjectBackIndex));
        }
    }

    final int lastIndex = pathParts.size() - 1;
    final String objectType = lastIndex == 0 ? "Folder" : "TropixObject";
    final String queryString = String.format(
            "%s o%d %s inner join o0.permissions p left join p.users u left join p.groups g left join g.users gu where (u.cagridId = :userId or gu.cagridId = :userId) and o0.parentFolder is null %s and o0.class is Folder",
            objectType, lastIndex, joins.toString(), wheres.toString());
    return executePathQuery(userId, String.format("o%d", lastIndex), queryString, 0, parameters);
}

From source file:edu.umn.msi.tropix.persistence.dao.hibernate.TropixObjectDaoImpl.java

public TropixObject getHomeDirectoryPath(final String userId, final List<String> pathParts) {
    if (LOG.isDebugEnabled()) {
        LOG.debug(String.format("getPath called with userId %s and path parts %s", userId,
                Iterables.toString(pathParts)));
    }/*from   w ww . j a v  a 2s .  c  om*/
    final StringBuilder joins = new StringBuilder(), wheres = new StringBuilder();
    final ListIterator<String> pathPartsIter = pathParts.listIterator(pathParts.size());
    final LinkedList<String> parameters = Lists.newLinkedList();
    while (pathPartsIter.hasPrevious()) {
        int index = pathPartsIter.previousIndex() + 1;
        final String pathPart = pathPartsIter.previous();

        int nextObjectBackIndex = pathPartsIter.previousIndex() + 1;
        joins.append(String.format(" inner join o%d.permissionParents as o%d ", index, nextObjectBackIndex));
        wheres.append(String.format(" and o%d.deletedTime is null", index));
        wheres.append(String.format(" and o%d.committed is true", index));
        addConstraintForPathPart(pathPart, index, wheres, parameters);
    }

    final String queryString = String.format(
            "User u, TropixObject o%d %s where u.cagridId = :userId %s and u.homeFolder.id = o0.id",
            pathParts.size(), joins.toString(), wheres.toString());
    return executePathQuery(userId, String.format("o%d", pathParts.size()), queryString, 1, parameters);
}

From source file:org.commonjava.maven.ext.core.impl.DependencyManipulator.java

/**
 * This will load the remote overrides. It will first try to load any overrides that might have
 * been prepopulated by the REST scanner, failing that it will load from a remote POM file.
 *
 * @return the loaded overrides/* w ww . j  a va 2 s. c om*/
 * @throws ManipulationException if an error occurs.
 */
private Map<ArtifactRef, String> loadRemoteOverrides() throws ManipulationException {
    final DependencyState depState = session.getState(DependencyState.class);
    final RESTState restState = session.getState(RESTState.class);
    final List<ProjectVersionRef> gavs = depState.getRemoteBOMDepMgmt();

    // While in theory we are only mapping ProjectRef -> NewVersion if we store key as ProjectRef we can't then have
    // org.foo:foobar -> 1.2.0.redhat-2
    // org.foo:foobar -> 2.0.0.redhat-2
    // Which is useful for strictAlignment scenarios (although undefined for non-strict).
    Map<ArtifactRef, String> restOverrides = depState.getRemoteRESTOverrides();
    Map<ArtifactRef, String> bomOverrides = new LinkedHashMap<>();
    Map<ArtifactRef, String> mergedOverrides = new LinkedHashMap<>();

    if (gavs != null) {
        final ListIterator<ProjectVersionRef> iter = gavs.listIterator(gavs.size());
        // Iterate in reverse order so that the first GAV in the list overwrites the last
        while (iter.hasPrevious()) {
            final ProjectVersionRef ref = iter.previous();
            Map<ArtifactRef, String> rBom = effectiveModelBuilder.getRemoteDependencyVersionOverrides(ref);

            // We don't normalise the BOM list here as ::applyOverrides can handle multiple GA with different V
            // for strict override. However, it is undefined if strict is not enabled.
            bomOverrides.putAll(rBom);
        }
    }

    if (depState.getPrecedence() == DependencyPrecedence.BOM) {
        mergedOverrides = bomOverrides;
        if (mergedOverrides.isEmpty()) {
            String msg = restState.isEnabled() ? "dependencySource for restURL" : "dependencyManagement";

            logger.warn("No dependencies found for dependencySource {}. Has {} been configured? ",
                    depState.getPrecedence(), msg);
        }
    }
    if (depState.getPrecedence() == DependencyPrecedence.REST) {
        mergedOverrides = restOverrides;
        if (mergedOverrides.isEmpty()) {
            logger.warn("No dependencies found for dependencySource {}. Has restURL been configured? ",
                    depState.getPrecedence());
        }
    } else if (depState.getPrecedence() == DependencyPrecedence.RESTBOM) {
        mergedOverrides = bomOverrides;

        removeDuplicateArtifacts(mergedOverrides, restOverrides);
        mergedOverrides.putAll(restOverrides);
    } else if (depState.getPrecedence() == DependencyPrecedence.BOMREST) {
        mergedOverrides = restOverrides;
        removeDuplicateArtifacts(mergedOverrides, bomOverrides);
        mergedOverrides.putAll(bomOverrides);
    }
    logger.info("Remote precedence is {}", depState.getPrecedence());
    logger.debug("Final remote override list is {}", mergedOverrides);
    return mergedOverrides;
}

From source file:com.projity.pm.criticalpath.CriticalPath.java

private void doPass(Task startTask, TaskSchedule.CalculationContext context) {
    if (startTask != null) {
        startTask.getSchedule(context.scheduleType).invalidate();
        startTask.setCalculationStateCount(getCalculationStateCount());
    }/*  www.j av  a2s.  c o m*/

    PredecessorTaskList.TaskReference taskReference;
    boolean forward = context.forward;
    ListIterator i = forward ? predecessorTaskList.listIterator() : predecessorTaskList.reverseIterator();
    Task task;
    TaskSchedule schedule;

    //      int count = 0;
    //      long z = System.currentTimeMillis();
    boolean projectForward = project.isForward();
    while (forward ? i.hasNext() : i.hasPrevious()) {
        taskReference = (PredecessorTaskList.TaskReference) (forward ? i.next() : i.previous());
        traceTask = task = taskReference.getTask();
        context.taskReferenceType = taskReference.getType();
        schedule = task.getSchedule(context.scheduleType);
        if (!forward)
            context.taskReferenceType = -taskReference.getType();

        if (task.isReverseScheduled()) {//  reverse scheduled must always be calculated
            schedule.invalidate();
            task.setCalculationStateCount(context.stateCount);
        }
        if (task.getCalculationStateCount() >= context.stateCount) {
            schedule.calcDates(context);
            if (context.assign && (projectForward || !task.isWbsParent())) { // in reverse scheduling, I see some parents have 0 or 1 as their dates. This is a workaround.
                if (schedule.getBegin() != 0L && !isSentinel(task))
                    earliestStart = Math.min(earliestStart, schedule.getStart());
                if (schedule.getEnd() != 0 && !isSentinel(task))
                    latestFinish = Math.max(latestFinish, schedule.getFinish());
            }

            //            schedule.dump();
        }
    }
    //      System.out.println("pass forward=" + forward + " tasks:" + count + " time " + (System.currentTimeMillis() -z) + " ms");
}

From source file:com.projity.pm.graphic.spreadsheet.common.CommonSpreadSheet.java

/**
 * Used by find dialog//  w  w w  . ja  v  a 2 s .  c  o  m
 */
public boolean findNext(SearchContext context) {
    SpreadSheetSearchContext ctx = (SpreadSheetSearchContext) context;

    int row = this.getCurrentRow();
    // make sure in bounds
    if (row < 0)
        row = 0;
    if (row >= getCache().getSize())
        row = getCache().getSize() - 1;

    ListIterator i = getCache().getIterator(row);
    if (ctx.getRow() != -1) { // after the first search, need to move ahead or back
        if (ctx.isForward())
            if (i.hasNext())
                i.next();
            else if (i.hasPrevious())
                i.previous();
    }

    boolean found = false;
    GraphicNode gnode = null;
    Object obj;
    Node node;
    while (ctx.isForward() ? i.hasNext() : i.hasPrevious()) {
        gnode = (GraphicNode) (ctx.isForward() ? i.next() : i.previous());
        if (gnode.isVoid())
            continue;
        node = gnode.getNode();
        obj = node.getImpl();
        if (ctx.matches(obj)) {
            found = true;
            break;
        }
    }
    if (found) {
        int r = getCache().getRowAt(gnode);
        int col = getFieldArray().indexOf(ctx.getField()) - 1;
        this.changeSelection(r, col, false, false);
        ctx.setRow(r);
    }
    return found;
}

From source file:org.apache.jsieve.commands.extensions.Log.java

/**
 * @see org.apache.jsieve.commands.AbstractCommand#executeBasic(MailAdapter,
 *      Arguments, Block, SieveContext)/*from  ww  w . j av  a  2s .c o m*/
 */
protected Object executeBasic(MailAdapter mail, Arguments arguments, Block block, SieveContext context)
        throws SieveException {
    String logLevel = null;
    String message = null;

    // First MAY be a tag argument of fatal, error, warn, info, debug or
    // trace.
    // default is info.
    final ListIterator<Argument> argumentsIter = arguments.getArgumentList().listIterator();
    boolean stop = false;

    // Tag processing
    while (!stop && argumentsIter.hasNext()) {
        final Argument argument = argumentsIter.next();
        if (argument instanceof TagArgument) {
            final String tag = ((TagArgument) argument).getTag();

            // LogLevel?
            if (null == logLevel && (tag.equals(FATAL_TAG) || tag.equals(ERROR_TAG) || tag.equals(WARN_TAG)
                    || tag.equals(INFO_TAG) || tag.equals(DEBUG_TAG) || tag.equals(TRACE_TAG)))
                logLevel = tag;
            else
                throw context.getCoordinate().syntaxException("Found unexpected TagArgument");
        } else {
            // Stop when a non-tag argument is encountered
            argumentsIter.previous();
            stop = true;
        }
    }

    // Next MUST be a String
    if (argumentsIter.hasNext()) {
        final Argument argument = argumentsIter.next();
        if (argument instanceof StringListArgument) {
            List<String> strings = ((StringListArgument) argument).getList();
            if (1 == strings.size())
                message = strings.get(0);
        }
    }
    if (null == message)
        throw context.getCoordinate().syntaxException("Expecting a String");

    // Everthing else is an error
    if (argumentsIter.hasNext())
        throw context.getCoordinate().syntaxException("Found unexpected arguments");

    log(null == logLevel ? ":info" : logLevel, message, context);

    return null;
}

From source file:geogebra.common.kernel.implicit.AlgoIntersectImplicitpolys.java

private void insert(double[] pair) {
    ListIterator<double[]> it = valPairs.listIterator();
    double eps = 1E-3; //find good value...
    while (it.hasNext()) {
        double[] p = it.next();
        if (Kernel.isGreater(p[0], pair[0], eps)) {
            it.previous();
            break;
        }//from w ww  .  ja v a 2s .c o m
        if (Kernel.isEqual(p[0], pair[0], eps)) {
            if (Kernel.isGreater(p[1], pair[1], eps)) {
                it.previous();
                break;
            }
            if (Kernel.isEqual(p[1], pair[1], eps))
                return; //do not add
        }
    }
    it.add(pair);
}