Example usage for java.util LinkedList listIterator

List of usage examples for java.util LinkedList listIterator

Introduction

In this page you can find the example usage for java.util LinkedList listIterator.

Prototype

ListIterator<E> listIterator();

Source Link

Document

Returns a list iterator over the elements in this list (in proper sequence).

Usage

From source file:org.apache.fop.layoutmgr.inline.FootnoteLayoutManager.java

/** {@inheritDoc} */
@Override//from www  .  j av  a2 s  . c om
public void addAreas(PositionIterator posIter, LayoutContext context) {
    // "Unwrap" the NonLeafPositions stored in posIter and put
    // them in a new list, that will be given to the citationLM
    LinkedList<Position> positionList = new LinkedList<Position>();
    Position pos;
    while (posIter.hasNext()) {
        pos = posIter.next();
        if (pos != null && pos.getPosition() != null) {
            positionList.add(pos.getPosition());
        }
    }

    // FootnoteLM does not create any area,
    // so the citationLM child will add directly to the FootnoteLM parent area
    citationLM.setParent(getParent());

    // make the citationLM add its areas
    LayoutContext childContext = new LayoutContext(context);
    PositionIterator childPosIter = new PositionIterator(positionList.listIterator());
    LayoutManager childLM;
    while ((childLM = childPosIter.getNextChildLM()) != null) {
        childLM.addAreas(childPosIter, childContext);
        childContext.setLeadingSpace(childContext.getTrailingSpace());
        childContext.setFlags(LayoutContext.RESOLVE_LEADING_SPACE, true);
    }
}

From source file:org.apache.fop.layoutmgr.list.ListBlockLayoutManager.java

/**
 * A list block generates one or more normal block areas whose child areas are
 * normal block areas returned by the children of fo:list-block. See XSL-FO 1.1 6.8.2.
 *
 * @param parentIter the position iterator
 * @param layoutContext the layout context for adding areas
 *//*from   www.  ja  v a2 s. co  m*/
@Override
public void addAreas(PositionIterator parentIter, LayoutContext layoutContext) {
    getParentArea(null);

    // if this will create the first block area in a page
    // and display-align is after or center, add space before
    if (layoutContext.getSpaceBefore() > 0) {
        addBlockSpacing(0.0, MinOptMax.getInstance(layoutContext.getSpaceBefore()));
    }

    addId();

    // the list block contains areas stacked from each list item

    LayoutManager childLM;
    LayoutContext lc = new LayoutContext(0);
    LayoutManager firstLM = null;
    LayoutManager lastLM = null;
    Position firstPos = null;
    Position lastPos = null;

    // "unwrap" the NonLeafPositions stored in parentIter
    // and put them in a new list;
    LinkedList<Position> positionList = new LinkedList<Position>();
    Position pos;
    while (parentIter.hasNext()) {
        pos = parentIter.next();
        if (pos.getIndex() >= 0) {
            if (firstPos == null) {
                firstPos = pos;
            }
            lastPos = pos;
        }
        if (pos instanceof NonLeafPosition && (pos.getPosition() != null)
                && pos.getPosition().getLM() != this) {
            // pos was created by a child of this ListBlockLM
            positionList.add(pos.getPosition());
            lastLM = pos.getPosition().getLM();
            if (firstLM == null) {
                firstLM = lastLM;
            }
        }
    }

    addMarkersToPage(true, isFirst(firstPos), isLast(lastPos));

    PositionIterator childPosIter = new PositionIterator(positionList.listIterator());
    while ((childLM = childPosIter.getNextChildLM()) != null) {
        // Add the block areas to Area
        // set the space adjustment ratio
        lc.setSpaceAdjust(layoutContext.getSpaceAdjust());
        lc.setFlags(LayoutContext.FIRST_AREA, childLM == firstLM);
        lc.setFlags(LayoutContext.LAST_AREA, childLM == lastLM);
        lc.setStackLimitBP(layoutContext.getStackLimitBP());
        childLM.addAreas(childPosIter, lc);
    }

    addMarkersToPage(false, isFirst(firstPos), isLast(lastPos));

    // We are done with this area add the background
    TraitSetter.addBackground(curBlockArea, getListBlockFO().getCommonBorderPaddingBackground(), this);
    TraitSetter.addSpaceBeforeAfter(curBlockArea, layoutContext.getSpaceAdjust(), effSpaceBefore,
            effSpaceAfter);

    flush();

    curBlockArea = null;
    resetSpaces();

    checkEndOfLayout(lastPos);
}

From source file:org.apache.phoenix.execute.PhoenixTxnIndexMutationGenerator.java

private void processRollback(IndexMaintainer maintainer, byte[] txRollbackAttribute, ResultScanner scanner,
        Set<ColumnReference> mutableColumns, Collection<Mutation> indexUpdates,
        Map<ImmutableBytesPtr, MultiMutation> mutations, boolean replyWrite, final PTable table)
        throws IOException, SQLException {
    if (scanner != null) {
        Result result;/*from   w ww.ja va 2 s .  co m*/
        // Loop through last committed row state plus all new rows associated with current transaction
        // to generate point delete markers for all index rows that were added. We don't have Tephra
        // manage index rows in change sets because we don't want to be hit with the additional
        // memory hit and do not need to do conflict detection on index rows.
        ColumnReference emptyColRef = new ColumnReference(maintainer.getDataEmptyKeyValueCF(),
                maintainer.getEmptyKeyValueQualifier());
        while ((result = scanner.next()) != null) {
            Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow()));
            // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest
            // (as if we're "replaying" them in time order).
            List<Cell> cells = result.listCells();
            Collections.sort(cells, new Comparator<Cell>() {

                @Override
                public int compare(Cell o1, Cell o2) {
                    int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp());
                    if (c != 0)
                        return c;
                    c = o1.getTypeByte() - o2.getTypeByte();
                    if (c != 0)
                        return c;
                    c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(),
                            o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength());
                    if (c != 0)
                        return c;
                    return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength());
                }

            });
            int i = 0;
            int nCells = cells.size();
            Result oldResult = null, newResult;
            long readPtr = phoenixTransactionContext.getReadPointer();
            do {
                boolean hasPuts = false;
                LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
                long writePtr;
                Cell cell = cells.get(i);
                do {
                    hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
                    writePtr = cell.getTimestamp();
                    ListIterator<Cell> it = singleTimeCells.listIterator();
                    do {
                        // Add at the beginning of the list to match the expected HBase
                        // newest to oldest sort order (which TxTableState relies on
                        // with the Result.getLatestColumnValue() calls). However, we
                        // still want to add Cells in the expected order for each time
                        // bound as otherwise we won't find it in our old state.
                        it.add(cell);
                    } while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr);
                } while (i < nCells && cell.getTimestamp() <= readPtr);

                // Generate point delete markers for the prior row deletion of the old index value.
                // The write timestamp is the next timestamp, not the current timestamp,
                // as the earliest cells are the current values for the row (and we don't
                // want to delete the current row).
                if (oldResult != null) {
                    TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef, oldResult);
                    generateDeletes(indexUpdates, txRollbackAttribute, state, maintainer, replyWrite, table);
                }
                // Generate point delete markers for the new index value.
                // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not
                // generate deletes. We would have generated the delete above based on the state
                // of the previous row. The delete markers do not give us the state we need to
                // delete.
                if (hasPuts) {
                    newResult = Result.create(singleTimeCells);
                    // First row may represent the current state which we don't want to delete
                    if (writePtr > readPtr) {
                        TxTableState state = new TxTableState(mutableColumns, writePtr, m, emptyColRef,
                                newResult);
                        generateDeletes(indexUpdates, txRollbackAttribute, state, maintainer, replyWrite,
                                table);
                    }
                    oldResult = newResult;
                } else {
                    oldResult = null;
                }
            } while (i < nCells);
        }
    }
}

From source file:org.apache.phoenix.index.PhoenixTransactionalIndexer.java

private void processRollback(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData,
        byte[] txRollbackAttribute, ResultScanner scanner, Transaction tx, Set<ColumnReference> mutableColumns,
        Collection<Pair<Mutation, byte[]>> indexUpdates, Map<ImmutableBytesPtr, MultiMutation> mutations)
        throws IOException {
    if (scanner != null) {
        Result result;/*from w  ww.  jav  a2  s  .c  o  m*/
        // Loop through last committed row state plus all new rows associated with current transaction
        // to generate point delete markers for all index rows that were added. We don't have Tephra
        // manage index rows in change sets because we don't want to be hit with the additional
        // memory hit and do not need to do conflict detection on index rows.
        ColumnReference emptyColRef = new ColumnReference(
                indexMetaData.getIndexMaintainers().get(0).getDataEmptyKeyValueCF(),
                indexMetaData.getIndexMaintainers().get(0).getEmptyKeyValueQualifier());
        while ((result = scanner.next()) != null) {
            Mutation m = mutations.remove(new ImmutableBytesPtr(result.getRow()));
            // Sort by timestamp, type, cf, cq so we can process in time batches from oldest to newest
            // (as if we're "replaying" them in time order).
            List<Cell> cells = result.listCells();
            Collections.sort(cells, new Comparator<Cell>() {

                @Override
                public int compare(Cell o1, Cell o2) {
                    int c = Longs.compare(o1.getTimestamp(), o2.getTimestamp());
                    if (c != 0)
                        return c;
                    c = o1.getTypeByte() - o2.getTypeByte();
                    if (c != 0)
                        return c;
                    c = Bytes.compareTo(o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength(),
                            o1.getFamilyArray(), o1.getFamilyOffset(), o1.getFamilyLength());
                    if (c != 0)
                        return c;
                    return Bytes.compareTo(o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength(), o1.getQualifierArray(), o1.getQualifierOffset(),
                            o1.getQualifierLength());
                }

            });
            int i = 0;
            int nCells = cells.size();
            Result oldResult = null, newResult;
            long readPtr = tx.getReadPointer();
            do {
                boolean hasPuts = false;
                LinkedList<Cell> singleTimeCells = Lists.newLinkedList();
                long writePtr;
                Cell cell = cells.get(i);
                do {
                    hasPuts |= cell.getTypeByte() == KeyValue.Type.Put.getCode();
                    writePtr = cell.getTimestamp();
                    ListIterator<Cell> it = singleTimeCells.listIterator();
                    do {
                        // Add at the beginning of the list to match the expected HBase
                        // newest to oldest sort order (which TxTableState relies on
                        // with the Result.getLatestColumnValue() calls). However, we
                        // still want to add Cells in the expected order for each time
                        // bound as otherwise we won't find it in our old state.
                        it.add(cell);
                    } while (++i < nCells && (cell = cells.get(i)).getTimestamp() == writePtr);
                } while (i < nCells && cell.getTimestamp() <= readPtr);

                // Generate point delete markers for the prior row deletion of the old index value.
                // The write timestamp is the next timestamp, not the current timestamp,
                // as the earliest cells are the current values for the row (and we don't
                // want to delete the current row).
                if (oldResult != null) {
                    TxTableState state = new TxTableState(env, mutableColumns, indexMetaData.getAttributes(),
                            writePtr, m, emptyColRef, oldResult);
                    generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                }
                // Generate point delete markers for the new index value.
                // If our time batch doesn't have Puts (i.e. we have only Deletes), then do not
                // generate deletes. We would have generated the delete above based on the state
                // of the previous row. The delete markers do not give us the state we need to
                // delete.
                if (hasPuts) {
                    newResult = Result.create(singleTimeCells);
                    // First row may represent the current state which we don't want to delete
                    if (writePtr > readPtr) {
                        TxTableState state = new TxTableState(env, mutableColumns,
                                indexMetaData.getAttributes(), writePtr, m, emptyColRef, newResult);
                        generateDeletes(indexMetaData, indexUpdates, txRollbackAttribute, state);
                    }
                    oldResult = newResult;
                } else {
                    oldResult = null;
                }
            } while (i < nCells);
        }
    }
}

From source file:org.hippoecm.hst.demo.components.NonWorkflowWikiImporterComponent.java

/**
 * Relates the nodes to the previous nodes (in order of UUID)
 *
 * @param request/*from www. ja v  a2  s .com*/
 * @param response
 */
private void relateDocuments(HstRequest request, HstResponse response, Operation op, final int relations,
        String orderByProperty) {
    if (relations < 1) {
        return;
    }

    try {
        Session writableSession = this.getPersistableSession(request);
        Node wikipedia = writableSession
                .getNode("/" + request.getRequestContext().getSiteContentBasePath() + "/wikipedia");
        @SuppressWarnings("deprecation")
        Query q = writableSession.getWorkspace().getQueryManager()
                .createQuery("//element(*,demosite:wikidocument)[@hippo:paths='" + wikipedia.getIdentifier()
                        + "'] order by @jcr:uuid", Query.XPATH);
        QueryResult result = q.execute();
        NodeIterator it = result.getNodes();

        // Fill first queue with elements, which can't be fully linked yet
        Node current;
        LinkedList<Node> firsts = new LinkedList<Node>();
        LinkedList<Node> previous = new LinkedList<Node>();
        while (it.hasNext() && firsts.size() != relations) {
            current = it.nextNode();
            firsts.add(current);
            previous.add(current);
        }

        // Link to previous documents, update previous documents queue, occasionally save
        int count = 1;
        while (it.hasNext()) {
            current = it.nextNode();
            Iterator<Node> qit = previous.listIterator();

            while (qit.hasNext()) {
                op.perform(current, qit.next());
            }

            previous.remove();
            previous.add(current);

            if (count++ % 200 == 0) {
                writableSession.save();
            }
        }

        // Finally, link the first queue with elements
        Iterator<Node> fit = firsts.listIterator();
        while (fit.hasNext()) {
            current = fit.next();
            Iterator<Node> qit = previous.listIterator();

            while (qit.hasNext()) {
                op.perform(current, qit.next());
            }

            previous.remove();
            previous.add(current);
        }

        writableSession.save();
    } catch (RepositoryException e) {
        log.warn("Exception during relating wiki docs", e);
        response.setRenderParameter("message",
                "An exception happened. Did not relate wiki docs. " + e.toString());
    }
}

From source file:org.matonto.catalog.impl.SimpleCatalogManager.java

@Override
public Set<Conflict> getConflicts(Resource leftId, Resource rightId) throws MatOntoException {
    if (resourceExists(leftId, Commit.TYPE) && resourceExists(rightId, Commit.TYPE)) {
        try (RepositoryConnection conn = repository.getConnection()) {
            LinkedList<Value> leftList = new LinkedList<>();
            LinkedList<Value> rightList = new LinkedList<>();

            getCommitChainIterator(leftId, conn).forEachRemaining(leftList::add);
            getCommitChainIterator(rightId, conn).forEachRemaining(rightList::add);

            ListIterator<Value> leftIterator = leftList.listIterator();
            ListIterator<Value> rightIterator = rightList.listIterator();

            Value originalEnd = null;/*from   ww  w.j  a  va2s. c o  m*/
            while (leftIterator.hasNext() && rightIterator.hasNext()) {
                Value currentId = leftIterator.next();
                if (!currentId.equals(rightIterator.next())) {
                    leftIterator.previous();
                    rightIterator.previous();
                    break;
                } else {
                    originalEnd = currentId;
                }
            }
            if (originalEnd == null) {
                throw new MatOntoException("There is no common parent between the provided Commits.");
            }

            Model left = createModelFromIterator(leftIterator, conn);
            Model right = createModelFromIterator(rightIterator, conn);

            Model duplicates = mf.createModel(left);
            duplicates.retainAll(right);

            left.removeAll(duplicates);
            right.removeAll(duplicates);

            Resource deletionContext = vf.createIRI(DELETION_CONTEXT);

            Model leftDeletions = mf.createModel(left.filter(null, null, null, deletionContext));
            Model rightDeletions = mf.createModel(right.filter(null, null, null, deletionContext));

            left.removeAll(leftDeletions);
            right.removeAll(rightDeletions);

            Set<Conflict> result = new HashSet<>();

            Model original = getCompiledResource((Resource) originalEnd).get();
            IRI rdfType = vf.createIRI(RDF.TYPE.stringValue());

            leftDeletions.forEach(statement -> {
                Resource subject = statement.getSubject();
                IRI predicate = statement.getPredicate();
                if (predicate.equals(rdfType) || right.contains(subject, predicate, null)) {
                    result.add(createConflict(subject, predicate, original, left, leftDeletions, right,
                            rightDeletions));
                    Stream.of(left, right, rightDeletions)
                            .forEach(item -> item.remove(subject, predicate, null));
                }
            });

            rightDeletions.forEach(statement -> {
                Resource subject = statement.getSubject();
                IRI predicate = statement.getPredicate();
                if (predicate.equals(rdfType) || left.contains(subject, predicate, null)) {
                    result.add(createConflict(subject, predicate, original, left, leftDeletions, right,
                            rightDeletions));
                    Stream.of(left, leftDeletions, right)
                            .forEach(item -> item.remove(subject, predicate, null));
                }
            });

            left.forEach(statement -> {
                Resource subject = statement.getSubject();
                IRI predicate = statement.getPredicate();
                if (right.contains(subject, predicate, null)) {
                    result.add(createConflict(subject, predicate, original, left, leftDeletions, right,
                            rightDeletions));
                    Stream.of(leftDeletions, right, rightDeletions)
                            .forEach(item -> item.remove(subject, predicate, null));
                }
            });

            return result;
        } catch (RepositoryException e) {
            throw new MatOntoException("Error in repository connection.", e);
        }
    }
    throw new MatOntoException("One or both of the commit IRIs could not be found in the Repository.");
}

From source file:org.openhab.binding.canopen.internal.CANOpenBinding.java

public void bindingChanged(BindingProvider provider, String itemName) {
    super.bindingChanged(provider, itemName);

    // register as listener!

    if (!((CANOpenBindingProvider) provider).providesBindingFor(itemName)) { // Item was removed
        //         logger.debug("removing item " + itemName);
        // TODO provide for removal of unused sockets
        // remove PDO
        Integer pdoId = itemPDOMap.get(itemName);
        if (pdoId != null) {
            LinkedList<CANOpenItemConfig> pdoList = pdoConfigMap.get(pdoId);
            if (pdoList != null) {
                ListIterator<CANOpenItemConfig> iterator = pdoList.listIterator();
                while (iterator.hasNext()) {
                    if (itemName.equals(iterator.next().getItemName()))
                        iterator.remove();
                }//from   w w w .j  a  va 2 s .  c o m
            }
            itemPDOMap.remove(itemName);
        }
        // remove NMT
        Iterator<CANOpenItemConfig> configsIterator = nmtConfigMap.values().iterator();
        while (configsIterator.hasNext()) {
            if (itemName.equals(configsIterator.next().getItemName()))
                configsIterator.remove();
        }

        // remove SDOs
        for (SDODeviceManager manager : sdoDeviceManagerMap.values()) {
            if (manager.removeItemName(itemName))
                break;
        }

    } else {
        CANOpenItemConfig itemConfig = ((CANOpenBindingProvider) provider).getItemConfig(itemName);
        ISocketConnection conn = null;
        try {
            conn = CANOpenActivator.getConnection(itemConfig.getCanInterfaceId());
            conn.addMessageReceivedListener(this);
            conn.open();
        } catch (Exception e) {
            logger.error(
                    "Error adding listener to or opening socket " + itemConfig.getCanInterfaceId() + ": " + e);
        }

        if (conn != null) {
            initializeItem(conn, itemConfig);
        }

        // add PDO
        if (itemConfig.providesTxPDO()) {
            LinkedList<CANOpenItemConfig> pdoList = pdoConfigMap.get(itemConfig.getPDOId());
            if (pdoList == null) {
                pdoList = new LinkedList<CANOpenItemConfig>();
                pdoConfigMap.put(itemConfig.getPDOId(), pdoList);
            }
            pdoList.add(itemConfig);
            itemPDOMap.put(itemName, itemConfig.getPDOId());
        }

        // add NMT
        if (itemConfig.providesNMT()) {
            nmtConfigMap.put(itemConfig.getDeviceID(), itemConfig);
        }

        // add SDO
        if (itemConfig.providesSDO()) {
            SDODeviceManager manager = sdoDeviceManagerMap.get(itemConfig.getDeviceID());
            if (manager == null) {
                manager = new SDODeviceManager(this, sdoResponseTimeout);
                sdoDeviceManagerMap.put(itemConfig.getDeviceID(), manager);
            }
            manager.add(itemConfig);
        }

        logger.debug("added item config " + itemConfig);
    }
}

From source file:org.openhab.binding.canopen.internal.CANOpenBinding.java

@Override
public void messageReceived(int canID, boolean rtr, byte[] data, ISocketConnection canInterface) {
    // logger.debug("Message received: = " + Util.canMessageToSting(canID, data));
    if (!rtr) {//from w  w  w  .  j  a  va  2 s .c om
        // Process NMT
        if ((canID & ~0x7F) == 0x700) { // NMT error control (bootup and heart beat)
            int deviceID = canID & 0x7F;
            if ((autoStartAll || autoStartNodes.contains(new Integer(deviceID))))
                if (data.length > 0 && data[0] != 5) {
                    byte[] msg = new byte[2];
                    msg[0] = 1; // C= start command
                    msg[1] = (byte) (deviceID); // node id
                    canInterface.send(new CanId(0), msg);
                }

            CANOpenItemConfig config = nmtConfigMap.get(deviceID);
            if (config != null) {
                String itemName = config.getItemName();
                State s = config.nmtToState(data);
                addIgnoreEvent(itemName, s);
                eventPublisher.postUpdate(itemName, s);
            }

            return;
        }
        // Process SDO
        if ((canID & ~0x7F) == 0x580) {
            if (data == null || data.length != 8) {
                logger.error("Received SDO message with less than 8 data bytes: "
                        + Util.canMessageToSting(canID, data));
                return;
            }

            int deviceID = canID & 0x7F;
            SDODeviceManager manager = sdoDeviceManagerMap.get(deviceID);
            if (manager != null) {
                manager.messageReceived(canID, rtr, data, canInterface);
            } else
                logger.warn("Couldn't find SDO handler for device " + deviceID);

            return;
        } else {
            // process PDO
            LinkedList<CANOpenItemConfig> pdoList = pdoConfigMap.get(canID);
            if (pdoList != null) {
                ListIterator<CANOpenItemConfig> iterator = pdoList.listIterator();
                while (iterator.hasNext()) {
                    CANOpenItemConfig config = iterator.next();
                    String itemName = config.getItemName();
                    State s = config.pdoToState(data);
                    addIgnoreEvent(itemName, s);
                    eventPublisher.postUpdate(itemName, s);
                }
            }
        }
    }
}

From source file:org.squashtest.tm.service.importer.ImportLog.java

/**
 *
 * <p>The logs for the datasets also contain the logs for the dataset parameter values.
 * Since they were inserted separately we need to purge them from redundant informations.</p>
 *
 * <p>To ensure consistency we need to check that, for each imported line, there can be
 *   a log entry with status OK if this is the unique log entry for that line.
 *   From a procedural point of view we need, for each imported lines, to remove a log entry
 *   if it has a status OK and ://from   w  w  w.j  a v a  2  s  .  c o  m
 * <ul>
 *    <li>there was already a status OK for that line, or</li>
 *    <li>there is at least 1 warning or error</li>
 * </ul>
 * </p>
 *
 */

/*
 * NB : This code relies on the fact that the log entries are sorted by import line number then by status,
 * and that the status OK comes first.
 *
 * Basically the job boils down to the following rules :
 *
 * for each line, for each entry, if there was a previous element with status OK on this line -> remove it.
 *
 */
public void packLogs() {

    LinkedList<LogEntry> listiterableLogs = new LinkedList<>(findAllFor(DATASET));

    Integer precLine = null;
    boolean okFoundOnPrecEntry = false;

    ListIterator<LogEntry> iter = listiterableLogs.listIterator();

    while (iter.hasNext()) {

        LogEntry entry = iter.next();
        Integer curLine = entry.getLine();
        ImportStatus curStatus = entry.getStatus();

        /*
         * if we found an occurence on the previous entry
         * and the current entry concerns the same line and
         * remove it.
         */
        if (okFoundOnPrecEntry && curLine.equals(precLine)) {

            // finding the previous element actually means
            // to backtrack twice (because the cursor points
            // to the next element already)

            iter.previous();
            iter.previous();

            iter.remove();

            // now we replace the cursor where it was before
            // the 'if'.
            iter.next();

        }

        // now we set our flag according to the status of the
        // current entry and update precLine
        okFoundOnPrecEntry = curStatus == OK;
        precLine = curLine;
    }

    // once complete we replace the original list with the filtered one
    findAllFor(DATASET).clear();
    logEntriesPerType.putAll(DATASET, listiterableLogs);

}

From source file:org.squashtest.tm.service.internal.repository.hibernate.TestCaseDaoImpl.java

/**
 * @param userSorting/*www  . j a  v  a  2  s .  c  o  m*/
 * @return
 */
/*
 * Issue #1629
 *
 * Observed problem : test cases sorted by references are indeed sorted by reference, but no more by name. Actual
 * problem : We always want them to be sorted by reference and name, even when we want primarily sort them by
 * project or execution type or else. Solution : The resultset will be sorted on all the attributes (ascending), and
 * the Sorting specified by the user will have an higher priority.
 *
 * See #createEffectiveSorting(Sorting sorting), just below
 */

private List<Sorting> createEffectiveSorting(Sorting userSorting) {

    LinkedList<Sorting> sortings = new LinkedList<Sorting>(defaultVerifiedTcSorting);

    // from that list we filter out the redundant element, considering the argument.
    // note that the sorting order is irrelevant here.
    ListIterator<Sorting> iterator = sortings.listIterator();
    while (iterator.hasNext()) {
        Sorting defaultSorting = iterator.next();
        if (defaultSorting.getSortedAttribute().equals(userSorting.getSortedAttribute())) {
            iterator.remove();
            break;
        }
    }

    // now we can set the Sorting specified by the user in first position
    sortings.addFirst(userSorting);

    return sortings;
}