Example usage for java.util LinkedList listIterator

List of usage examples for java.util LinkedList listIterator

Introduction

In this page you can find the example usage for java.util LinkedList listIterator.

Prototype

public ListIterator<E> listIterator(int index) 

Source Link

Document

Returns a list-iterator of the elements in this list (in proper sequence), starting at the specified position in the list.

Usage

From source file:com.projity.pm.graphic.frames.DocumentFrame.java

/**
 * sees if currently selected row belongs to main project. used to see if can insert a subproject. subprojects can
 * only be inserted into master project// w  w  w  .  ja va2s .  co m
 * @return
 */
public boolean isCurrentRowInMainProject() {
    CommonSpreadSheet spreadSheet = getTopSpreadSheet();
    if (spreadSheet == null)
        return true;
    int row = spreadSheet.getCurrentRow();
    if (row == -1)
        return true;
    Node current = spreadSheet.getCurrentRowNode();
    SpreadSheetModel model = (SpreadSheetModel) spreadSheet.getModel();
    LinkedList previousNodes = model.getPreviousVisibleNodesFromRow(row);
    if (previousNodes == null)
        return true;
    previousNodes.add(current); // treat current node first since going backwards
    ListIterator i = previousNodes.listIterator(previousNodes.size());
    while (i.hasPrevious()) {
        Object o = ((Node) i.previous()).getImpl();
        if (o instanceof Task) {
            if (((Task) o).isInSubproject())
                return false;
            return project == ((Task) o).getOwningProject();
        }
    }

    return true;
}

From source file:org.apache.fop.layoutmgr.table.TableContentLayoutManager.java

/**
 * Creates Knuth elements by iterating over a TableRowIterator.
 * @param iter TableRowIterator instance to fetch rows from
 * @param context Active LayoutContext//from   w w w.  java2 s .co  m
 * @param alignment alignment indicator
 * @param bodyType Indicates what kind of body is being processed
 *                  (BODY, HEADER or FOOTER)
 * @return An element list
 */
private LinkedList getKnuthElementsForRowIterator(TableRowIterator iter, LayoutContext context, int alignment,
        int bodyType) {
    LinkedList returnList = new LinkedList();
    EffRow[] rowGroup = iter.getNextRowGroup();
    // TODO homogenize the handling of keeps and breaks
    context.clearKeepsPending();
    context.setBreakBefore(Constants.EN_AUTO);
    context.setBreakAfter(Constants.EN_AUTO);
    Keep keepWithPrevious = Keep.KEEP_AUTO;
    int breakBefore = Constants.EN_AUTO;
    if (rowGroup != null) {
        RowGroupLayoutManager rowGroupLM = new RowGroupLayoutManager(getTableLM(), rowGroup, stepper);
        List nextRowGroupElems = rowGroupLM.getNextKnuthElements(context, alignment, bodyType);
        keepWithPrevious = keepWithPrevious.compare(context.getKeepWithPreviousPending());
        breakBefore = context.getBreakBefore();
        int breakBetween = context.getBreakAfter();
        returnList.addAll(nextRowGroupElems);
        while ((rowGroup = iter.getNextRowGroup()) != null) {
            rowGroupLM = new RowGroupLayoutManager(getTableLM(), rowGroup, stepper);

            //Note previous pending keep-with-next and clear the strength
            //(as the layout context is reused)
            Keep keepWithNextPending = context.getKeepWithNextPending();
            context.clearKeepWithNextPending();

            //Get elements for next row group
            nextRowGroupElems = rowGroupLM.getNextKnuthElements(context, alignment, bodyType);
            /*
             * The last break element produced by TableStepper (for the previous row
             * group) may be used to represent the break between the two row groups.
             * Its penalty value and break class must just be overridden by the
             * characteristics of the keep or break between the two.
             *
             * However, we mustn't forget that if the after border of the last row of
             * the row group is thicker in the normal case than in the trailing case,
             * an additional glue will be appended to the element list. So we may have
             * to go two steps backwards in the list.
             */

            //Determine keep constraints
            Keep keep = keepWithNextPending.compare(context.getKeepWithPreviousPending());
            context.clearKeepWithPreviousPending();
            keep = keep.compare(getTableLM().getKeepTogether());
            int penaltyValue = keep.getPenalty();
            int breakClass = keep.getContext();

            breakBetween = BreakUtil.compareBreakClasses(breakBetween, context.getBreakBefore());
            if (breakBetween != Constants.EN_AUTO) {
                penaltyValue = -KnuthElement.INFINITE;
                breakClass = breakBetween;
            }
            BreakElement breakElement;
            ListIterator elemIter = returnList.listIterator(returnList.size());
            ListElement elem = (ListElement) elemIter.previous();
            if (elem instanceof KnuthGlue) {
                breakElement = (BreakElement) elemIter.previous();
            } else {
                breakElement = (BreakElement) elem;
            }
            breakElement.setPenaltyValue(penaltyValue);
            breakElement.setBreakClass(breakClass);
            returnList.addAll(nextRowGroupElems);
            breakBetween = context.getBreakAfter();
        }
    }
    /*
     * The last break produced for the last row-group of this table part must be
     * removed, because the breaking after the table will be handled by TableLM.
     * Unless the element list ends with a glue, which must be kept to accurately
     * represent the content. In such a case the break is simply disabled by setting
     * its penalty to infinite.
     */
    ListIterator elemIter = returnList.listIterator(returnList.size());
    ListElement elem = (ListElement) elemIter.previous();
    if (elem instanceof KnuthGlue) {
        BreakElement breakElement = (BreakElement) elemIter.previous();
        breakElement.setPenaltyValue(KnuthElement.INFINITE);
    } else {
        elemIter.remove();
    }
    context.updateKeepWithPreviousPending(keepWithPrevious);
    context.setBreakBefore(breakBefore);

    //fox:widow-content-limit
    int widowContentLimit = getTableLM().getTable().getWidowContentLimit().getValue();
    if (widowContentLimit != 0 && bodyType == TableRowIterator.BODY) {
        ElementListUtils.removeLegalBreaks(returnList, widowContentLimit);
    }
    //fox:orphan-content-limit
    int orphanContentLimit = getTableLM().getTable().getOrphanContentLimit().getValue();
    if (orphanContentLimit != 0 && bodyType == TableRowIterator.BODY) {
        ElementListUtils.removeLegalBreaksFromEnd(returnList, orphanContentLimit);
    }

    return returnList;
}

From source file:org.apache.hadoop.hbase.regionserver.HLog.java

private static List<Path> splitLog(final Path rootDir, final FileStatus[] logfiles, final FileSystem fs,
        final HBaseConfiguration conf) throws IOException {
    final Map<byte[], WriterAndPath> logWriters = Collections
            .synchronizedMap(new TreeMap<byte[], WriterAndPath>(Bytes.BYTES_COMPARATOR));
    List<Path> splits = null;

    // Number of threads to use when log splitting to rewrite the logs.
    // More means faster but bigger mem consumption.
    int logWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);

    // Number of logs to read concurrently when log splitting.
    // More means faster but bigger mem consumption  */
    int concurrentLogReads = conf.getInt("hbase.regionserver.hlog.splitlog.reader.threads", 3);
    // Is append supported?
    boolean append = isAppend(conf);
    try {/*w w w  .  j  a  va2  s  . c  om*/
        int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / concurrentLogReads)).intValue();
        for (int step = 0; step < maxSteps; step++) {
            final Map<byte[], LinkedList<HLogEntry>> logEntries = new TreeMap<byte[], LinkedList<HLogEntry>>(
                    Bytes.BYTES_COMPARATOR);
            // Stop at logfiles.length when it's the last step
            int endIndex = step == maxSteps - 1 ? logfiles.length
                    : step * concurrentLogReads + concurrentLogReads;
            for (int i = (step * concurrentLogReads); i < endIndex; i++) {
                // Check for possibly empty file. With appends, currently Hadoop 
                // reports a zero length even if the file has been sync'd. Revisit if
                // HADOOP-4751 is committed.
                long length = logfiles[i].getLen();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Splitting hlog " + (i + 1) + " of " + logfiles.length + ": "
                            + logfiles[i].getPath() + ", length=" + logfiles[i].getLen());
                }
                recoverLog(fs, logfiles[i].getPath(), append);
                SequenceFile.Reader in = null;
                int count = 0;
                try {
                    in = new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
                    try {
                        HLogKey key = newKey(conf);
                        KeyValue val = new KeyValue();
                        while (in.next(key, val)) {
                            byte[] regionName = key.getRegionName();
                            LinkedList<HLogEntry> queue = logEntries.get(regionName);
                            if (queue == null) {
                                queue = new LinkedList<HLogEntry>();
                                LOG.debug("Adding queue for " + Bytes.toStringBinary(regionName));
                                logEntries.put(regionName, queue);
                            }
                            HLogEntry hle = new HLogEntry(val, key);
                            queue.push(hle);
                            count++;
                            // Make the key and value new each time; otherwise same instance
                            // is used over and over.
                            key = newKey(conf);
                            val = new KeyValue();
                        }
                        LOG.debug("Pushed=" + count + " entries from " + logfiles[i].getPath());
                    } catch (IOException e) {
                        LOG.debug("IOE Pushed=" + count + " entries from " + logfiles[i].getPath());
                        e = RemoteExceptionHandler.checkIOException(e);
                        if (!(e instanceof EOFException)) {
                            LOG.warn("Exception processing " + logfiles[i].getPath()
                                    + " -- continuing. Possible DATA LOSS!", e);
                        }
                    }
                } catch (IOException e) {
                    if (length <= 0) {
                        LOG.warn("Empty hlog, continuing: " + logfiles[i] + " count=" + count, e);
                        continue;
                    }
                    throw e;
                } finally {
                    try {
                        if (in != null) {
                            in.close();
                        }
                    } catch (IOException e) {
                        LOG.warn("Close in finally threw exception -- continuing", e);
                    }
                    // Delete the input file now so we do not replay edits. We could
                    // have gotten here because of an exception. If so, probably
                    // nothing we can do about it. Replaying it, it could work but we
                    // could be stuck replaying for ever. Just continue though we
                    // could have lost some edits.
                    fs.delete(logfiles[i].getPath(), true);
                }
            }
            ExecutorService threadPool = Executors.newFixedThreadPool(logWriterThreads);
            for (final byte[] key : logEntries.keySet()) {
                Thread thread = new Thread(Bytes.toStringBinary(key)) {
                    @Override
                    public void run() {
                        LinkedList<HLogEntry> entries = logEntries.get(key);
                        LOG.debug("Thread got " + entries.size() + " to process");
                        long threadTime = System.currentTimeMillis();
                        try {
                            int count = 0;
                            // Items were added to the linkedlist oldest first. Pull them
                            // out in that order.
                            for (ListIterator<HLogEntry> i = entries.listIterator(entries.size()); i
                                    .hasPrevious();) {
                                HLogEntry logEntry = i.previous();
                                WriterAndPath wap = logWriters.get(key);
                                if (wap == null) {
                                    Path logfile = new Path(
                                            HRegion.getRegionDir(
                                                    HTableDescriptor.getTableDir(rootDir,
                                                            logEntry.getKey().getTablename()),
                                                    HRegionInfo.encodeRegionName(key)),
                                            HREGION_OLDLOGFILE_NAME);
                                    Path oldlogfile = null;
                                    SequenceFile.Reader old = null;
                                    if (fs.exists(logfile)) {
                                        FileStatus stat = fs.getFileStatus(logfile);
                                        if (stat.getLen() <= 0) {
                                            LOG.warn("Old hlog file " + logfile + " is zero "
                                                    + "length. Deleting existing file");
                                            fs.delete(logfile, false);
                                        } else {
                                            LOG.warn("Old hlog file " + logfile + " already "
                                                    + "exists. Copying existing file to new file");
                                            oldlogfile = new Path(logfile.toString() + ".old");
                                            fs.rename(logfile, oldlogfile);
                                            old = new SequenceFile.Reader(fs, oldlogfile, conf);
                                        }
                                    }
                                    SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, logfile,
                                            getKeyClass(conf), KeyValue.class, getCompressionType(conf));
                                    wap = new WriterAndPath(logfile, w);
                                    logWriters.put(key, wap);
                                    if (LOG.isDebugEnabled()) {
                                        LOG.debug("Creating new hlog file writer for path " + logfile
                                                + " and region " + Bytes.toStringBinary(key));
                                    }

                                    if (old != null) {
                                        // Copy from existing log file
                                        HLogKey oldkey = newKey(conf);
                                        KeyValue oldval = new KeyValue();
                                        for (; old.next(oldkey, oldval); count++) {
                                            if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
                                                LOG.debug("Copied " + count + " edits");
                                            }
                                            w.append(oldkey, oldval);
                                        }
                                        old.close();
                                        fs.delete(oldlogfile, true);
                                    }
                                }
                                wap.w.append(logEntry.getKey(), logEntry.getEdit());
                                count++;
                            }
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Applied " + count + " total edits to " + Bytes.toStringBinary(key)
                                        + " in " + (System.currentTimeMillis() - threadTime) + "ms");
                            }
                        } catch (IOException e) {
                            e = RemoteExceptionHandler.checkIOException(e);
                            LOG.warn("Got while writing region " + Bytes.toStringBinary(key) + " log " + e);
                            e.printStackTrace();
                        }
                    }
                };
                threadPool.execute(thread);
            }
            threadPool.shutdown();
            // Wait for all threads to terminate
            try {
                for (int i = 0; !threadPool.awaitTermination(5, TimeUnit.SECONDS); i++) {
                    LOG.debug("Waiting for hlog writers to terminate, iteration #" + i);
                }
            } catch (InterruptedException ex) {
                LOG.warn("Hlog writers were interrupted, possible data loss!");
            }
        }
    } finally {
        splits = new ArrayList<Path>(logWriters.size());
        for (WriterAndPath wap : logWriters.values()) {
            wap.w.close();
            LOG.debug("Closed " + wap.p);
            splits.add(wap.p);
        }
    }
    return splits;
}

From source file:org.gluu.site.ldap.persistence.LdifDataUtility.java

/**
 * Remove base entry with all sub entries
 * //  w  w  w  .j  a va2  s . c  om
 * @param connection
 *            Connection to LDAP server
 * @param baseDN
 *            Base DN entry
 * @return The result code for the processing that was performed.
 */
public ResultCode deleteEntryWithAllSubs(LDAPConnection connection, String baseDN) {
    ResultCode resultCode = ResultCode.SUCCESS;
    SearchResult searchResult = null;
    try {
        searchResult = connection.search(baseDN, SearchScope.SUB, "objectClass=*");
        if ((searchResult == null) || (searchResult.getEntryCount() == 0)) {
            return ResultCode.LOCAL_ERROR;
        }
    } catch (LDAPSearchException le) {
        log.error("Failed to search subordinate entries", le);
        return ResultCode.LOCAL_ERROR;
    }

    LinkedList<String> dns = new LinkedList<String>();
    for (SearchResultEntry entry : searchResult.getSearchEntries()) {
        dns.add(entry.getDN());
    }

    ListIterator<String> listIterator = dns.listIterator(dns.size());
    while (listIterator.hasPrevious()) {
        try {
            connection.delete(listIterator.previous());
        } catch (LDAPException le) {
            log.error("Failed to delete entry", le);
            resultCode = ResultCode.LOCAL_ERROR;
            break;
        }
    }

    return resultCode;
}

From source file:org.mbari.aved.ui.classifier.knowledgebase.SearchableConceptTreePanel.java

/**
 * Loads the branch of a particular concept. This method does the following
 * <ol>//from  w ww . j  ava  2s  .  c o m
 *      <li>Walks from the concept up the tree to the root concept, storing
 *      the concepts in a list. (This is very fast)</li>
 *  <li>Starts walking from the root down (using lazyExpand), searching each
 *      childnode for a matching primary name (which was stored in the first
 *      step</li>
 *  <li>If a matching primary name is found this stops otherwise
 *              it opens the next level and searches for the next mathc in the list.</li>
 *  <li></li>
 * </ol>
 * @param concept
 */
private void openNode(final Concept concept) {
    if (log.isDebugEnabled()) {
        log.debug("Opening node containing " + concept);
    }

    if (concept == null) {
        return;
    }

    // Get the list of concepts up to root
    final LinkedList conceptList = new LinkedList();
    Concept c = concept;

    while (c != null) {
        conceptList.add(c);
        c = (Concept) c.getParentConcept();
    }

    // Walk the tree from root on down opening nodes as we go
    final ListIterator i = conceptList.listIterator(conceptList.size());

    // Skip the root
    i.previous();

    final JTree tree = getJTree();
    final DefaultTreeModel treeModel = (DefaultTreeModel) tree.getModel();
    final DefaultMutableTreeNode rootNode = (DefaultMutableTreeNode) treeModel.getRoot();
    TreePath path = new TreePath(rootNode.getPath());

    tree.setSelectionPath(path);

    DefaultMutableTreeNode parentNode = rootNode;

    while (i.hasPrevious()) {
        c = (Concept) i.previous();

        final TreeConcept parentTreeConcept = (TreeConcept) parentNode.getUserObject();

        parentTreeConcept.lazyExpand(parentNode);

        // treeModel.reload(parentNode);
        final Enumeration enm = parentNode.children();

        while (enm.hasMoreElements()) {
            final DefaultMutableTreeNode node = (DefaultMutableTreeNode) enm.nextElement();
            final TreeConcept tc = (TreeConcept) node.getUserObject();

            if (tc.getName().equals(c.getPrimaryConceptNameAsString())) {
                parentNode = node;

                break;
            }
        }
    }

    final TreeNode _parentNode = parentNode;

    SwingUtilities.invokeLater(new Runnable() {
        public void run() {
            treeModel.reload(_parentNode);
            tree.scrollPathToVisible(new TreePath(_parentNode));
        }
    });
}