Example usage for java.util ListIterator previous

List of usage examples for java.util ListIterator previous

Introduction

In this page you can find the example usage for java.util ListIterator previous.

Prototype

E previous();

Source Link

Document

Returns the previous element in the list and moves the cursor position backwards.

Usage

From source file:org.eclipse.jubula.rc.common.driver.KeyTyper.java

/**
 * @param options options//from  w w w.j  a v  a 2 s.  co m
 * @param alreadyDown alreadyDown
 * @param i i
 * @param interceptor The interceptor that will be used to wait for event
 *                    confirmation.
 * @param keyUpMatcher The event matcher to be used for key release event
 *                     confirmation.
 */
private void releaseKeys(InterceptorOptions options, Set alreadyDown, ListIterator i,
        IRobotEventInterceptor interceptor, IEventMatcher keyUpMatcher) {

    boolean waitForConfirm = interceptor != null && keyUpMatcher != null;
    // Release all keys in reverse order.
    Set alreadyUp = new HashSet();
    while (i.hasPrevious()) {
        Integer keycode = (Integer) i.previous();
        if (log.isDebugEnabled()) {
            log.debug("trying to release: " + keycode.intValue()); //$NON-NLS-1$
        }
        if (!alreadyUp.contains(keycode) && alreadyDown.contains(keycode)) {
            try {
                IRobotEventConfirmer confirmer = null;
                if (waitForConfirm) {
                    confirmer = interceptor.intercept(options);
                }
                if (log.isDebugEnabled()) {
                    log.debug("releasing: " + keycode.intValue()); //$NON-NLS-1$
                }
                alreadyUp.add(keycode);
                m_robot.keyRelease(keycode.intValue());
                if (waitForConfirm) {
                    confirmer.waitToConfirm(null, keyUpMatcher);
                }
            } catch (RobotException e) {
                log.error("error releasing keys", e); //$NON-NLS-1$
                if (!i.hasPrevious()) {
                    throw e;
                }
            }
        }
    }
}

From source file:org.apache.cayenne.access.DbGenerator.java

/**
 * Returns a list of all schema statements that should be executed with the
 * current configuration./*from   ww  w  . j ava  2s  . c o  m*/
 */
public List<String> configuredStatements() {
    List<String> list = new ArrayList<>();

    if (shouldDropTables) {
        ListIterator<DbEntity> it = dbEntitiesInInsertOrder.listIterator(dbEntitiesInInsertOrder.size());
        while (it.hasPrevious()) {
            DbEntity ent = it.previous();
            list.addAll(dropTables.get(ent.getName()));
        }
    }

    if (shouldCreateTables) {
        for (final DbEntity ent : dbEntitiesInInsertOrder) {
            list.add(createTables.get(ent.getName()));
        }
    }

    if (shouldCreateFKConstraints) {
        for (final DbEntity ent : dbEntitiesInInsertOrder) {
            List<String> fks = createConstraints.get(ent.getName());
            list.addAll(fks);
        }
    }

    if (shouldDropPKSupport) {
        list.addAll(dropPK);
    }

    if (shouldCreatePKSupport) {
        list.addAll(createPK);
    }

    return list;
}

From source file:org.openconcerto.openoffice.ODSingleXMLDocument.java

static private int[] getLastNulls(final Map<Tuple2<Namespace, String>, ContentPart> parts,
        final List<Content> content, final int contentSize) {
    // start from the end until we leave the epilogue (quicker than traversing the main part as
    // prologue and epilogue sizes are bounded and small)
    ContentPart contentPart = null;//ww  w .jav  a2  s  .  c  o m
    final ListIterator<Content> thisChildrenIter = content.listIterator(contentSize);
    int nullsStartIndex = -1;
    while ((contentPart == null || contentPart == ContentPart.EPILOGUE) && thisChildrenIter.hasPrevious()) {
        contentPart = getPart(parts, thisChildrenIter.previous());
        if (contentPart != null) {
            nullsStartIndex = -1;
        } else if (nullsStartIndex < 0) {
            nullsStartIndex = thisChildrenIter.nextIndex();
        }
    }
    final int lastNullsStart = contentPart == null || contentPart == ContentPart.EPILOGUE
            ? thisChildrenIter.nextIndex()
            : thisChildrenIter.nextIndex() + 1;
    final int lastNullsEnd = nullsStartIndex < 0 ? lastNullsStart : nullsStartIndex + 1;
    return new int[] { lastNullsStart, lastNullsEnd };
}

From source file:uk.ac.ucl.excites.sapelli.storage.eximport.csv.CSVRecordsImporter.java

private void parseHeaderRow(String row) throws Exception {
    // Check row length:
    if (row.isEmpty())
        throw new IllegalArgumentException("Header row cannot be null");

    // Get separator by reading last char of the header:
    try {// w w w.  j  av a  2  s.  c  om
        separator = Separator.getSeparator(row.charAt(row.length() - 1));
    } catch (IllegalArgumentException iae) {
        separator = CSVRecordsExporter.DEFAULT_SEPARATOR;
        addWarning("Header row does no contain separator hint, trying to parse file using default separator ("
                + separator.toString() + ").");
    }

    // Split header row:
    List<String> headers = splitRow(row);

    // Parse attribute headers:
    Long modelID = null;
    Integer modelSchemaNo = null;
    String schemaName = null;
    try {
        ListIterator<String> headerIter = headers.listIterator(headers.size());
        String attributeHeader;
        int equalsPos;
        //   Iterate over headers back to front until we hit one without '=':
        while (headerIter.hasPrevious()
                && (equalsPos = (attributeHeader = headerIter.previous()).indexOf('=')) != -1) {
            switch (attributeHeader.substring(0, equalsPos)) {
            case Schema.ATTRIBUTE_MODEL_ID:
                modelID = Long.valueOf(attributeHeader.substring(equalsPos + 1));
                break;
            case Schema.ATTRIBUTE_MODEL_SCHEMA_NUMBER:
                modelSchemaNo = Integer.valueOf(attributeHeader.substring(equalsPos + 1));
                break;
            case Schema.ATTRIBUTE_SCHEMA_NAME:
                schemaName = deescapeAndUnquote(attributeHeader.substring(equalsPos + 1));
                break;
            case Exporter.ATTRIBUTE_EXPORTED_AT:
                try {
                    exportedAt = new TimeStamp(Exporter.ExportedAtFormatter.withOffsetParsed()
                            .parseDateTime(attributeHeader.substring(equalsPos + 1)));
                } catch (Exception e) {
                    addWarning(
                            "Error upon parsing exportedAt time: " + attributeHeader.substring(equalsPos + 1));
                }
                break;
            default:
                addWarning("Ignored unrecognised attribute header: " + attributeHeader);
            }
            // Remove attribute header:
            headerIter.remove();
        }
    } catch (Exception e) {
        // don't throw here
        client.logWarning("Error upon parsing CSV attribute header: " + ExceptionHelpers.getMessageAndCause(e));
    }

    // Get schema:
    if (modelID != null && modelSchemaNo != null) {
        Schema headerSchema = null;
        try {
            headerSchema = client.getSchema(modelID, modelSchemaNo, schemaName);
        } catch (Exception e) {
            if (schema != null)
                addWarning("Could not find schema: " + e.getMessage() + ". Using fallback schema ("
                        + schema.toString() + ").");
            else
                throw e;
        }
        if (schema != null && !headerSchema.equals(schema))
            addWarning("CSV schema (" + headerSchema.toString() + ") is different from given fallback schema ("
                    + schema.toString() + ").");
        schema = headerSchema; // !!!
    } else {
        String error = "No (readable) model/schema information in header!";
        if (schema != null)
            addWarning(error + " Using fallback schema (" + schema.toString() + ").");
        else
            throw new Exception("Could not find model/schema information in header!");
    }

    if (schema != null) {
        // Parse column headers:
        List<ColumnPointer<?>> headerColumnPointers = new ArrayList<ColumnPointer<?>>(headers.size());
        for (String columnHeader : headers) // (the remaining headers should all be qualified column names)
            headerColumnPointers.add(ColumnPointer.ByName(schema, columnHeader, false, false));
        // if we get her nothing went wrong above:
        this.columnPointers = headerColumnPointers;
    }
}

From source file:com.opengamma.elsql.ElSqlParser.java

private void parseContainerSection(ContainerSqlFragment container, ListIterator<Line> lineIterator,
        int indent) {
    while (lineIterator.hasNext()) {
        Line line = lineIterator.next();
        if (line.isComment()) {
            lineIterator.remove();//w w  w .j  a v a2  s.  c om
            continue;
        }
        if (line.indent() <= indent) {
            lineIterator.previous();
            return;
        }
        String trimmed = line.lineTrimmed();
        if (trimmed.startsWith("@NAME")) {
            Matcher nameMatcher = NAME_PATTERN.matcher(trimmed);
            if (nameMatcher.matches() == false) {
                throw new IllegalArgumentException("@NAME found with invalid format: " + line);
            }
            NameSqlFragment nameFragment = new NameSqlFragment(nameMatcher.group(1));
            parseContainerSection(nameFragment, lineIterator, line.indent());
            if (nameFragment.getFragments().size() == 0) {
                throw new IllegalArgumentException("@NAME found with no subsequent indented lines: " + line);
            }
            container.addFragment(nameFragment);
            _namedFragments.put(nameFragment.getName(), nameFragment);

        } else if (indent < 0) {
            throw new IllegalArgumentException(
                    "Invalid fragment found at root level, only @NAME is permitted: " + line);

        } else if (trimmed.startsWith("@PAGING")) {
            Matcher pagingMatcher = PAGING_PATTERN.matcher(trimmed);
            if (pagingMatcher.matches() == false) {
                throw new IllegalArgumentException("@PAGING found with invalid format: " + line);
            }
            PagingSqlFragment whereFragment = new PagingSqlFragment(pagingMatcher.group(1),
                    pagingMatcher.group(2));
            parseContainerSection(whereFragment, lineIterator, line.indent());
            if (whereFragment.getFragments().size() == 0) {
                throw new IllegalArgumentException("@PAGING found with no subsequent indented lines: " + line);
            }
            container.addFragment(whereFragment);

        } else if (trimmed.startsWith("@WHERE")) {
            if (trimmed.equals("@WHERE") == false) {
                throw new IllegalArgumentException("@WHERE found with invalid format: " + line);
            }
            WhereSqlFragment whereFragment = new WhereSqlFragment();
            parseContainerSection(whereFragment, lineIterator, line.indent());
            if (whereFragment.getFragments().size() == 0) {
                throw new IllegalArgumentException("@WHERE found with no subsequent indented lines: " + line);
            }
            container.addFragment(whereFragment);

        } else if (trimmed.startsWith("@AND")) {
            Matcher andMatcher = AND_PATTERN.matcher(trimmed);
            if (andMatcher.matches() == false) {
                throw new IllegalArgumentException("@AND found with invalid format: " + line);
            }
            AndSqlFragment andFragment = new AndSqlFragment(andMatcher.group(1),
                    StringUtils.strip(andMatcher.group(2), " ="));
            parseContainerSection(andFragment, lineIterator, line.indent());
            if (andFragment.getFragments().size() == 0) {
                throw new IllegalArgumentException("@AND found with no subsequent indented lines: " + line);
            }
            container.addFragment(andFragment);

        } else if (trimmed.startsWith("@OR")) {
            Matcher orMatcher = OR_PATTERN.matcher(trimmed);
            if (orMatcher.matches() == false) {
                throw new IllegalArgumentException("@OR found with invalid format: " + line);
            }
            OrSqlFragment orFragment = new OrSqlFragment(orMatcher.group(1),
                    StringUtils.strip(orMatcher.group(2), " ="));
            parseContainerSection(orFragment, lineIterator, line.indent());
            if (orFragment.getFragments().size() == 0) {
                throw new IllegalArgumentException("@OR found with no subsequent indented lines: " + line);
            }
            container.addFragment(orFragment);

        } else if (trimmed.startsWith("@IF")) {
            Matcher ifMatcher = IF_PATTERN.matcher(trimmed);
            if (ifMatcher.matches() == false) {
                throw new IllegalArgumentException("@IF found with invalid format: " + line);
            }
            IfSqlFragment ifFragment = new IfSqlFragment(ifMatcher.group(1),
                    StringUtils.strip(ifMatcher.group(2), " ="));
            parseContainerSection(ifFragment, lineIterator, line.indent());
            if (ifFragment.getFragments().size() == 0) {
                throw new IllegalArgumentException("@IF found with no subsequent indented lines: " + line);
            }
            container.addFragment(ifFragment);

        } else {
            parseLine(container, line);
        }
    }
}

From source file:org.cloudata.core.common.conf.CloudataConf.java

private void loadResources(Properties props, ArrayList resources, boolean reverse, boolean quiet) {
    ListIterator i = resources.listIterator(reverse ? resources.size() : 0);
    while (reverse ? i.hasPrevious() : i.hasNext()) {
        loadResource(props, reverse ? i.previous() : i.next(), quiet);
    }//from ww w.  j a  va 2s . c o  m
}

From source file:org.apache.cayenne.access.DbGenerator.java

/**
 * Executes a set of commands to drop/create database objects. This is the
 * main worker method of DbGenerator. Command set is built based on
 * pre-configured generator settings./*from  w ww  .j a va2 s  . co  m*/
 */
public void runGenerator(DataSource ds) throws Exception {
    this.failures = null;

    try (Connection connection = ds.getConnection();) {

        // drop tables
        if (shouldDropTables) {
            ListIterator<DbEntity> it = dbEntitiesInInsertOrder.listIterator(dbEntitiesInInsertOrder.size());
            while (it.hasPrevious()) {
                DbEntity ent = it.previous();
                for (String statement : dropTables.get(ent.getName())) {
                    safeExecute(connection, statement);
                }
            }
        }

        // create tables
        List<String> createdTables = new ArrayList<>();
        if (shouldCreateTables) {
            for (final DbEntity ent : dbEntitiesInInsertOrder) {

                // only create missing tables

                safeExecute(connection, createTables.get(ent.getName()));
                createdTables.add(ent.getName());
            }
        }

        // create FK
        if (shouldCreateTables && shouldCreateFKConstraints) {
            for (DbEntity ent : dbEntitiesInInsertOrder) {

                if (createdTables.contains(ent.getName())) {
                    List<String> fks = createConstraints.get(ent.getName());
                    for (String fk : fks) {
                        safeExecute(connection, fk);
                    }
                }
            }
        }

        // drop PK
        if (shouldDropPKSupport) {
            List<String> dropAutoPKSQL = getAdapter().getPkGenerator()
                    .dropAutoPkStatements(dbEntitiesRequiringAutoPK);
            for (final String sql : dropAutoPKSQL) {
                safeExecute(connection, sql);
            }
        }

        // create pk
        if (shouldCreatePKSupport) {
            List<String> createAutoPKSQL = getAdapter().getPkGenerator()
                    .createAutoPkStatements(dbEntitiesRequiringAutoPK);
            for (final String sql : createAutoPKSQL) {
                safeExecute(connection, sql);
            }
        }

        new DbGeneratorPostprocessor().execute(connection, getAdapter());
    }
}

From source file:mekhq.Utilities.java

public static Map<String, Integer> sortMapByValue(Map<String, Integer> unsortMap, boolean highFirst) {

    // Convert Map to List
    List<Map.Entry<String, Integer>> list = new LinkedList<Map.Entry<String, Integer>>(unsortMap.entrySet());

    // Sort list with comparator, to compare the Map values
    Collections.sort(list, new Comparator<Map.Entry<String, Integer>>() {
        @Override//  www  . ja  va2  s.  co m
        public int compare(Map.Entry<String, Integer> o1, Map.Entry<String, Integer> o2) {
            return (o1.getValue()).compareTo(o2.getValue());
        }
    });

    // Convert sorted map back to a Map
    Map<String, Integer> sortedMap = new LinkedHashMap<String, Integer>();
    if (highFirst) {
        ListIterator<Map.Entry<String, Integer>> li = list.listIterator(list.size());
        while (li.hasPrevious()) {
            Map.Entry<String, Integer> entry = li.previous();
            sortedMap.put(entry.getKey(), entry.getValue());
        }
    } else {
        for (Iterator<Map.Entry<String, Integer>> it = list.iterator(); it.hasNext();) {
            Map.Entry<String, Integer> entry = it.next();
            sortedMap.put(entry.getKey(), entry.getValue());
        }
    }

    return sortedMap;
}

From source file:org.apache.atlas.model.typedef.AtlasStructDef.java

public void setAttributeDefs(List<AtlasAttributeDef> attributeDefs) {
    if (this.attributeDefs != null && this.attributeDefs == attributeDefs) {
        return;//from ww w.  j a v  a2s . co  m
    }

    if (CollectionUtils.isEmpty(attributeDefs)) {
        this.attributeDefs = new ArrayList<>();
    } else {
        // if multiple attributes with same name are present, keep only the last entry
        List<AtlasAttributeDef> tmpList = new ArrayList<>(attributeDefs.size());
        Set<String> attribNames = new HashSet<>();

        ListIterator<AtlasAttributeDef> iter = attributeDefs.listIterator(attributeDefs.size());
        while (iter.hasPrevious()) {
            AtlasAttributeDef attributeDef = iter.previous();
            String attribName = attributeDef != null ? attributeDef.getName() : null;

            if (attribName != null) {
                attribName = attribName.toLowerCase();

                if (!attribNames.contains(attribName)) {
                    tmpList.add(new AtlasAttributeDef(attributeDef));

                    attribNames.add(attribName);
                }
            }
        }
        Collections.reverse(tmpList);

        this.attributeDefs = tmpList;
    }
}

From source file:chat.viska.commons.pipelines.Pipeline.java

@Nullable
private ListIterator<Map.Entry<String, Pipe>> getIteratorOf(final String name) {
    if (StringUtils.isBlank(name)) {
        return null;
    }/*w  w  w.  ja v a  2s . c  o  m*/
    final ListIterator<Map.Entry<String, Pipe>> iterator = pipes.listIterator();
    while (iterator.hasNext()) {
        Map.Entry entry = iterator.next();
        if (entry.getKey().equals(name)) {
            iterator.previous();
            return iterator;
        }
    }
    return null;
}