Example usage for java.util LinkedList removeFirst

List of usage examples for java.util LinkedList removeFirst

Introduction

In this page you can find the example usage for java.util LinkedList removeFirst.

Prototype

public E removeFirst() 

Source Link

Document

Removes and returns the first element from this list.

Usage

From source file:org.dcm4chee.archive.conf.defaults.DeepEquals.java

/**
 * Compare two objects with a 'deep' comparison.  This will traverse the
 * Object graph and perform either a field-by-field comparison on each
 * object (if no .equals() method has been overridden from Object), or it
 * will call the customized .equals() method if it exists.  This method will
 * allow object graphs loaded at different times (with different object ids)
 * to be reliably compared.  Object.equals() / Object.hashCode() rely on the
 * object's identity, which would not consider to equivalent objects necessarily
 * equals.  This allows graphs containing instances of Classes that did no
 * overide .equals() / .hashCode() to be compared.  For example, testing for
 * existence in a cache.  Relying on an objects identity will not locate an
 * object in cache, yet relying on it being equivalent will.<br/><br/>
 *
 * This method will handle cycles correctly, for example A->B->C->A.  Suppose a and
 * a' are two separate instances of the A with the same values for all fields on
 * A, B, and C.  Then a.deepEquals(a') will return true.  It uses cycle detection
 * storing visited objects in a Set to prevent endless loops.
 * @param a Object one to compare//from ww  w. j a  va 2  s.c om
 * @param b Object two to compare
 * @return true if a is equivalent to b, false otherwise.  Equivalent means that
 * all field values of both subgraphs are the same, either at the field level
 * or via the respectively encountered overridden .equals() methods during
 * traversal.
 */
public static boolean deepEquals(Object a, Object b) {
    Set visited = new HashSet<DualKey>();
    LinkedList<DualKey> stack = new LinkedList<DualKey>();
    stack.addFirst(new DualKey(a, b, null));

    while (!stack.isEmpty()) {
        DualKey dualKey = stack.removeFirst();
        lastDualKey = dualKey;

        visited.add(dualKey);

        if (dualKey._key1 == dualKey._key2) { // Same instance is always equal to itself.
            continue;
        }

        if (dualKey._key1 == null || dualKey._key2 == null) {
            // check if one is null and another is an empty array

            if (dualKey._key1 == null) {
                if (dualKey._key2.getClass().isArray() && ((Object[]) dualKey._key2).length == 0)
                    continue;
            }
            if (dualKey._key2 == null) {
                if (dualKey._key1.getClass().isArray() && ((Object[]) dualKey._key1).length == 0)
                    continue;
            }

            // If either one is null, not equal (both can't be null, due to above comparison).
            return false;
        }

        if (!dualKey._key1.getClass().equals(dualKey._key2.getClass())) { // Must be same class
            return false;
        }

        // Handle all [] types.  In order to be equal, the arrays must be the same 
        // length, be of the same type, be in the same order, and all elements within
        // the array must be deeply equivalent.
        if (dualKey._key1.getClass().isArray()) {
            if (!compareArrays(dualKey, stack, visited)) {
                return false;
            }
            continue;
        }

        // Special handle SortedSets because they are fast to compare because their
        // elements must be in the same order to be equivalent Sets.
        if (dualKey._key1 instanceof SortedSet) {
            if (!compareOrderedCollection(dualKey, stack, visited)) {
                return false;
            }
            continue;
        }

        // Handled unordered Sets.  This is a slightly more expensive comparison because order cannot
        // be assumed, a temporary Map must be created, however the comparison still runs in O(N) time.
        if (dualKey._key1 instanceof Set) {
            if (!compareUnorderedCollection(dualKey, stack, visited)) {
                return false;
            }
            continue;
        }

        // Check any Collection that is not a Set.  In these cases, element order
        // matters, therefore this comparison is faster than using unordered comparison.
        if (dualKey._key1 instanceof Collection) {
            if (!compareOrderedCollection(dualKey, stack, visited)) {
                return false;
            }
            continue;
        }

        // Compare two SortedMaps.  This takes advantage of the fact that these
        // Maps can be compared in O(N) time due to their ordering.
        if (dualKey._key1 instanceof SortedMap) {
            if (!compareSortedMap(dualKey, stack, visited)) {
                return false;
            }
            continue;
        }

        // Compare two Unordered Maps. This is a slightly more expensive comparison because
        // order cannot be assumed, therefore a temporary Map must be created, however the
        // comparison still runs in O(N) time.
        if (dualKey._key1 instanceof Map) {
            if (!compareUnorderedMap(dualKey, stack, visited)) {
                return false;
            }
            continue;
        }

        if (hasCustomEquals(dualKey._key1.getClass())) {
            if (!dualKey._key1.equals(dualKey._key2)) {
                return false;
            }
            continue;
        }

        lastClass = dualKey._key1.getClass().toString();

        // check if we have a custom deepequals method for this class
        CustomDeepEquals de = customDeepEquals.get(dualKey._key1.getClass());
        if (de != null) {
            if (!de.deepEquals(dualKey._key1, dualKey._key2))
                return false;
        } else {
            Collection<Field> fields = getDeepDeclaredFields(dualKey._key1.getClass());

            for (Field field : fields) {
                try {

                    DualKey dk = new DualKey(field.get(dualKey._key1), field.get(dualKey._key2),
                            field.getName(), dualKey);
                    if (!visited.contains(dk)) {
                        stack.addFirst(dk);
                    }
                } catch (Exception ignored) {
                }
            }
        }
    }

    return true;
}

From source file:org.broadinstitute.gatk.utils.io.IOUtils.java

/**
 * Returns the last lines of the file./*from  ww  w  .ja  v  a2 s. c o m*/
 * NOTE: This is only safe to run on smaller files!
 *
 * @param file  File to read.
 * @param count Maximum number of lines to return.
 * @return The last count lines from file.
 * @throws IOException When unable to read the file.
 */
public static List<String> tail(File file, int count) throws IOException {
    LinkedList<String> tailLines = new LinkedList<String>();
    FileReader reader = new FileReader(file);
    try {
        LineIterator iterator = org.apache.commons.io.IOUtils.lineIterator(reader);
        int lineCount = 0;
        while (iterator.hasNext()) {
            String line = iterator.nextLine();
            lineCount++;
            if (lineCount > count)
                tailLines.removeFirst();
            tailLines.offer(line);
        }
    } finally {
        org.apache.commons.io.IOUtils.closeQuietly(reader);
    }
    return tailLines;
}

From source file:org.apache.metron.performance.load.monitor.writers.Writer.java

private void addToLookback(Double d, LinkedList<Double> lookback) {
    if (lookback.size() >= summaryLookback) {
        lookback.removeFirst();
    }//  w  w w. java 2  s.c o  m
    lookback.addLast(d);
}

From source file:cc.kave.commons.model.groum.comparator.Path.java

public Path getTail() {
    LinkedList<Node> tail = new LinkedList<>(path);
    if (!tail.isEmpty())
        tail.removeFirst();
    return new Path(tail);
}

From source file:org.apache.ofbiz.solr.SolrProductSearch.java

/**
 * Return a map of the side deep categories.
 *//*from   w ww  .ja v a 2 s  .co m*/
public static Map<String, Object> getSideDeepCategories(DispatchContext dctx, Map<String, Object> context) {
    Map<String, Object> result;
    String solrIndexName = (String) context.get("indexName");
    try {
        String catalogId = null;
        if (UtilValidate.isNotEmpty(context.get("catalogId")))
            catalogId = (String) context.get("catalogId");

        String productCategoryId = (String) context.get("productCategoryId") != null
                ? CategoryUtil.getCategoryNameWithTrail((String) context.get("productCategoryId"), dctx)
                : null;
        result = ServiceUtil.returnSuccess();
        Map<String, List<Map<String, Object>>> catLevel = new HashMap<String, List<Map<String, Object>>>();
        Debug.logInfo("productCategoryId: " + productCategoryId, module);

        //Add toplevel categories
        String[] trailElements = productCategoryId.split("/");

        //iterate over actual results
        for (String elements : trailElements) {
            //catIds must be greater than 3 chars
            if (elements.length() > 3) {
                Debug.logInfo("elements: " + elements, module);
                String categoryPath = CategoryUtil.getCategoryNameWithTrail(elements, dctx);
                String[] categoryPathArray = categoryPath.split("/");
                int level = Integer.parseInt(categoryPathArray[0]);
                String facetQuery = CategoryUtil.getFacetFilterForCategory(categoryPath, dctx);
                //Debug.logInfo("categoryPath: "+categoryPath + " facetQuery: "+facetQuery,module);
                Map<String, Object> query = SolrUtil.categoriesAvailable(catalogId, categoryPath, null,
                        facetQuery, false, 0, 0, solrIndexName);
                QueryResponse cat = (QueryResponse) query.get("rows");
                List<Map<String, Object>> categories = new ArrayList<Map<String, Object>>();

                List<FacetField> catList = (List<FacetField>) cat.getFacetFields();
                for (Iterator<FacetField> catIterator = catList.iterator(); catIterator.hasNext();) {
                    FacetField field = (FacetField) catIterator.next();
                    List<Count> catL = (List<Count>) field.getValues();
                    if (catL != null) {
                        for (Iterator<Count> catIter = catL.iterator(); catIter.hasNext();) {
                            FacetField.Count f = (FacetField.Count) catIter.next();
                            if (f.getCount() > 0) {
                                Map<String, Object> catMap = new HashMap<String, Object>();
                                LinkedList<String> iName = new LinkedList<String>();
                                iName.addAll(Arrays.asList(f.getName().split("/")));
                                //Debug.logInfo("topLevel "+topLevel,"");
                                // int l = Integer.parseInt((String) iName.getFirst());
                                catMap.put("catId", iName.getLast());
                                iName.removeFirst();
                                String path = f.getName();
                                catMap.put("path", path);
                                if (level > 0) {
                                    iName.removeLast();
                                    catMap.put("parentCategory", StringUtils.join(iName, "/"));
                                } else {
                                    catMap.put("parentCategory", null);
                                }
                                catMap.put("count", Long.toString(f.getCount()));
                                categories.add(catMap);
                            }
                        }
                    }
                }
                catLevel.put("menu-" + level, categories);
            }
        }
        result.put("categories", catLevel);
        result.put("numFound", (long) 0);

    } catch (Exception e) {
        result = ServiceUtil.returnError(e.toString());
        result.put("numFound", (long) 0);
    }
    return result;
}

From source file:com.asakusafw.runtime.directio.hadoop.HadoopDataSourceUtil.java

private static List<FileStatus> recursiveStep(FileSystem fs, List<FileStatus> current) throws IOException {
    assert fs != null;
    assert current != null;
    Set<Path> paths = new HashSet<>();
    List<FileStatus> results = new ArrayList<>();
    LinkedList<FileStatus> work = new LinkedList<>(current);
    while (work.isEmpty() == false) {
        FileStatus next = work.removeFirst();
        Path path = next.getPath();
        if (paths.contains(path) == false) {
            paths.add(path);/* www  .ja va2  s.  com*/
            results.add(next);
            if (next.isDirectory()) {
                FileStatus[] children;
                try {
                    children = fs.listStatus(path);
                } catch (FileNotFoundException e) {
                    children = null;
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(MessageFormat.format("Target file is not found: {0}", path), e); //$NON-NLS-1$
                    }
                }
                if (children != null) {
                    Collections.addAll(work, children);
                }
            }
        }
    }
    return results;
}

From source file:com.asakusafw.directio.tools.DirectIoList.java

@Override
public int run(String[] args) throws Exception {
    LinkedList<String> argList = new LinkedList<>();
    Collections.addAll(argList, args);
    while (argList.isEmpty() == false) {
        String arg = argList.removeFirst();
        if (arg.equals("--")) { //$NON-NLS-1$
            break;
        } else {// w  ww . ja  v a 2s.  co  m
            argList.addFirst(arg);
            break;
        }
    }
    if (argList.size() < 2) {
        LOG.error(MessageFormat.format("Invalid arguments: {0}", Arrays.toString(args)));
        System.err.println(MessageFormat.format(
                "Usage: hadoop {0} -conf <datasource-conf.xml> base-path resource-pattern [resource-pattern [...]]",
                getClass().getName()));
        return 1;
    }
    String path = argList.removeFirst();
    List<FilePattern> patterns = new ArrayList<>();
    for (String arg : argList) {
        patterns.add(FilePattern.compile(arg));
    }
    if (repository == null) {
        repository = HadoopDataSourceUtil.loadRepository(getConf());
    }
    String basePath = repository.getComponentPath(path);
    DirectDataSource source = repository.getRelatedDataSource(path);
    for (FilePattern pattern : patterns) {
        List<ResourceInfo> list = source.list(basePath, pattern, new Counter());
        for (ResourceInfo info : list) {
            System.out.println(info.getPath());
        }
    }
    return 0;
}

From source file:org.apache.shindig.gadgets.parse.AbstractParsingTestBase.java

private void assertHtmlEquals(String expected, String serialized) {
    // Compute the diff of expected vs. serialized, and disregard constructs that we don't
    // care about, such as whitespace deltas and differently-computed escape sequences.
    diff_match_patch dmp = new diff_match_patch();
    LinkedList<Diff> diffs = dmp.diff_main(expected, serialized);
    while (!diffs.isEmpty()) {
        Diff cur = diffs.removeFirst();
        switch (cur.operation) {
        case DELETE:
            if (StringUtils.isBlank(cur.text) || "amp;".equalsIgnoreCase(cur.text)) {
                continue;
            }//w  ww  .  java  2s . c  o  m
            if (diffs.isEmpty()) {
                // End of the set: assert known failure.
                assertEquals(expected, serialized);
            }
            Diff next = diffs.removeFirst();
            if (next.operation != Operation.INSERT) {
                // Next operation isn't a paired insert: assert known failure.
                assertEquals(expected, serialized);
            }
            if (!equivalentEntities(cur.text, next.text) && !cur.text.equalsIgnoreCase(next.text)) {
                // Delete/insert pair: fail unless each's text is equivalent
                // either in terms of case or entity equivalence.
                assertEquals(expected, serialized);
            }
            break;
        case INSERT:
            // Assert known failure unless insert is whitespace/blank.
            if (StringUtils.isBlank(cur.text) || "amp;".equalsIgnoreCase(cur.text)) {
                continue;
            }
            assertEquals(expected, serialized);
            break;
        default:
            // EQUALS: move on.
            break;
        }
    }
}

From source file:com.linuxbox.util.queueservice.mongodb.JavaQueueService.java

@Override
public synchronized QueueEntry dequeue(String identifier) throws QueueServiceException {
    LinkedList<QueueEntry> list = queue.get(identifier);
    if (list == null) {
        return null;
    }//w  w w  .  j a va  2s .  com
    QueueEntry entry = list.removeFirst();
    if (list.peek() == null) {
        queue.remove(identifier);
    }
    dequeued.push(entry);
    return (entry);
}

From source file:edu.uci.ics.jung.graph.predicates.ConnectedGraphPredicate.java

/**
 * Returns <code>true</code> if there exists a path from each 
 * vertex to all other vertices (ignoring edge direction).
 * /* w ww.j  ava2 s . c om*/
 * <p>Returns <code>true</code> for an empty graph.</p>
 * 
 * @see org.apache.commons.collections.Predicate#evaluate(java.lang.Object)
 */
public boolean evaluateGraph(ArchetypeGraph graph) {
    Graph g = (Graph) graph;
    if (g.numVertices() == 0)
        return true;

    Vertex start = (Vertex) g.getVertices().iterator().next(); // pick any vertex
    Set visited = new HashSet();
    LinkedList stack = new LinkedList();
    stack.add(start);
    // traverse through graph in depth-first order
    while (!stack.isEmpty()) {
        Vertex v = (Vertex) stack.removeFirst();
        visited.add(v);
        Set neighbors = v.getNeighbors();
        for (Iterator n_it = neighbors.iterator(); n_it.hasNext();) {
            Vertex w = (Vertex) n_it.next();
            if (!visited.contains(w))
                stack.addFirst(w);
        }
    }
    return (visited.size() == g.numVertices());
}