Example usage for java.util LinkedList push

List of usage examples for java.util LinkedList push

Introduction

In this page you can find the example usage for java.util LinkedList push.

Prototype

public void push(E e) 

Source Link

Document

Pushes an element onto the stack represented by this list.

Usage

From source file:org.jdto.util.expression.Expression.java

private ExpressionTerm parsePostfixExpr(String postfix) {
    //split the string
    String[] tokens = StringUtils.split(postfix, ' ');

    LinkedList<ExpressionTerm> termStack = new LinkedList<ExpressionTerm>();

    for (String token : tokens) {
        //if is not an operator, then read a literal or variable
        if (!isOperator(token)) {
            termStack.push(buildTerm(token));
        } else {//w  ww . j  av a2 s .co m
            //try to build a compound term and push it.
            Operator op = Operator.getOperaorByString(token);
            ExpressionTerm right = termStack.pop(); //first is the right side
            ExpressionTerm left = termStack.pop(); //and then the left side

            ExpressionTerm term = new CompoundTerm(op, left, right);
            termStack.push(term);
        }
    }

    //at this point the stack should have just one element.

    return termStack.pop();
}

From source file:org.graphipedia.wikipedia.parser.SimpleStaxParser.java

/**
 * Parses the elements in the XML file./*from w w w.j a v a  2  s.co  m*/
 * @param reader The XML stream.
 * @throws XMLStreamException when something goes wrong while parsing the XML file.
 */
private void parseElements(XMLStreamReader reader) throws XMLStreamException {
    LinkedList<String> elementStack = new LinkedList<String>();
    StringBuilder textBuffer = new StringBuilder();
    List<String> attributeValues = new ArrayList<String>();

    while (reader.hasNext()) {
        switch (reader.next()) {
        case XMLEvent.START_ELEMENT:
            String startElement = reader.getName().getLocalPart();
            elementStack.push(startElement);
            attributeValues = new ArrayList<String>();
            if (isInterestingWithAttributes(startElement)) {
                int noAttributes = reader.getAttributeCount();
                for (int i = 0; i < noAttributes; i += 1)
                    attributeValues.add(reader.getAttributeValue(i));
            }
            textBuffer.setLength(0);
            break;
        case XMLEvent.END_ELEMENT:
            String element = elementStack.pop();
            if (isInterestingWithAttributes(element)) {
                if (!handleElement(element, textBuffer.toString().trim(), attributeValues))
                    return;
            } else if (isInteresting(element)) {
                if (!handleElement(element, textBuffer.toString().trim()))
                    return;
            }
            break;
        case XMLEvent.CHARACTERS:
            if (isInteresting(elementStack.peek())) {
                textBuffer.append(reader.getText());
            }
            break;
        }
    }
}

From source file:org.powertac.common.repo.TariffRepo.java

public synchronized void addTariff(Tariff tariff) {
    // add to the tariffs list
    if (isRemoved(tariff.getId()) || null != tariffs.get(tariff.getId())) {
        log.error("Attempt to insert tariff with duplicate ID " + tariff.getId());
        return;/*w w  w  .j  a v  a 2  s .  co m*/
    }
    tariffs.put(tariff.getId(), tariff);

    // add to the brokerTariffs list
    LinkedList<Tariff> tariffList = brokerTariffs.get(tariff.getBroker().getId());
    if (null == tariffList) {
        tariffList = new LinkedList<Tariff>();
        brokerTariffs.put(tariff.getBroker().getId(), tariffList);
    }
    tariffList.push(tariff);
}

From source file:org.apache.tajo.scheduler.FairScheduler.java

@Override
protected void addQueryToQueue(QuerySchedulingInfo querySchedulingInfo) throws Exception {
    String submitQueueNameProperty = querySchedulingInfo.getQueryContext().get(ConfVars.JOB_QUEUE_NAMES.varname,
            ConfVars.JOB_QUEUE_NAMES.defaultVal);

    String queueName = submitQueueNameProperty.split(",")[0];
    synchronized (queues) {
        LinkedList<QuerySchedulingInfo> queue = queues.get(queueName);

        if (queue != null) {

            querySchedulingInfo.setAssignedQueueName(queueName);
            querySchedulingInfo.setCandidateQueueNames(Sets.newHashSet(queueName));
            queue.push(querySchedulingInfo);

            queryAssignedMap.put(querySchedulingInfo.getQueryId(), queueName);

            LOG.info(querySchedulingInfo.getQueryId() + " is assigned to the [" + queueName + "] queue");
        } else {//from  ww  w. j a  va2  s  .  c  om
            throw new Exception(
                    "Can't find proper query queue(requested queue=" + submitQueueNameProperty + ")");
        }
    }
}

From source file:jp.co.atware.solr.geta.GETAssocComponent.java

/**
 * GETAssoc?????<code>NamedList</code>???????
 * // w w  w.j  a va 2  s.co  m
 * @param inputStream GETAssoc??
 * @return <code>NamedList</code>?
 * @throws FactoryConfigurationError
 * @throws IOException
 */
protected NamedList<Object> convertResult(InputStream inputStream)
        throws FactoryConfigurationError, IOException {
    NamedList<Object> result = new NamedList<Object>();
    LinkedList<NamedList<Object>> stack = new LinkedList<NamedList<Object>>();
    stack.push(result);
    try {
        XMLStreamReader xml = XMLInputFactory.newInstance().createXMLStreamReader(inputStream);
        while (xml.hasNext()) {
            switch (xml.getEventType()) {
            case XMLStreamConstants.START_ELEMENT:
                NamedList<Object> element = new NamedList<Object>();
                stack.peek().add(xml.getName().toString(), element);
                stack.push(element);
                for (int i = 0; i < xml.getAttributeCount(); i++) {
                    String name = xml.getAttributeName(i).toString();
                    String value = xml.getAttributeValue(i);
                    ValueOf valueOf = valueTransMap.get(name);
                    if (valueOf != null) {
                        try {
                            element.add(name, valueOf.toValue(value));
                        } catch (NumberFormatException e) {
                            element.add(name, value);
                        }
                    } else {
                        element.add(name, value);
                    }
                }
                break;
            case XMLStreamConstants.END_ELEMENT:
                stack.pop();
                break;
            default:
                break;
            }
            xml.next();

        }
        xml.close();
    } catch (XMLStreamException e) {
        throw new IOException(e);
    }

    LOG.debug(result.toString());
    return result;
}

From source file:edu.tum.cs.vis.model.util.algorithm.ACCUM.java

/**
 * Diffuse a vector field at 1 vertex, weighted by a Gaussian of width 1/sqrt(invsigma2) Ported
 * from trimesh2 (2.12)/*ww  w  .j  ava2 s  . c om*/
 */
@SuppressWarnings("javadoc")
private static void diffuse_vert_field(final Model m, HashMap<Vertex, Curvature> curvatures,
        Map<Vertex, Long> flags, AtomicLong flag_curr, final ACCUM accum, int v, float invsigma2, Vertex flt) {
    Vertex vert = m.getVertices().get(v);
    if (vert.getNeighbors().size() == 0) {
        // flt.set(0, 0, 0);
        accum.a(m, curvatures, vert, flt, 1.0f, vert);
        return;
    }

    // flt.set(0, 0, 0);
    accum.a(m, curvatures, vert, flt, vert.getPointarea(), vert);
    float sum_w = vert.getPointarea();
    final Vector3f nv = vert.getNormalVector();

    long flag_curr_val = flag_curr.incrementAndGet();
    flags.put(vert, flag_curr_val);
    LinkedList<Vertex> boundary = new LinkedList<Vertex>();
    boundary.addAll(vert.getNeighbors());
    while (boundary.size() > 0) {
        Vertex n = boundary.pop();
        if (flags.get(n) != null && flags.get(n) == flag_curr_val)
            continue;
        flags.put(n, flag_curr_val);
        if (nv.dot(n.getNormalVector()) <= 0.0f)
            continue;
        // Gaussian weight
        float w = wt(n, vert, invsigma2);
        if (w == 0.0f)
            continue;
        // Downweight things pointing in different directions
        w *= nv.dot(n.getNormalVector());
        // Surface area "belonging" to each point
        w *= n.getPointarea();
        // Accumulate weight times field at neighbor
        accum.a(m, curvatures, vert, flt, w, n);
        sum_w += w;
        for (Vertex nn : n.getNeighbors()) {
            if (flags.get(nn) != null && flags.get(nn) == flag_curr_val)
                continue;
            boundary.push(nn);
        }
    }
    flt.scale(1 / sum_w);
}

From source file:org.pentaho.repo.controller.RepositoryBrowserController.java

public LinkedList<String> storeRecentSearch(String recentSearch) {
    LinkedList<String> recentSearches = getRecentSearches();
    try {// w w  w.  jav a  2  s. co m
        if (recentSearch == null || recentSearches.contains(recentSearch)) {
            return recentSearches;
        }
        recentSearches.push(recentSearch);
        if (recentSearches.size() > 5) {
            recentSearches.pollLast();
        }

        JSONArray jsonArray = new JSONArray();
        CollectionUtils.addAll(jsonArray, recentSearches.toArray());

        PropsUI props = PropsUI.getInstance();
        String jsonValue = props.getRecentSearches();
        JSONParser jsonParser = new JSONParser();
        JSONObject jsonObject = jsonValue != null ? (JSONObject) jsonParser.parse(jsonValue) : new JSONObject();

        String login = "file_repository_no_login";
        if (Spoon.getInstance().rep.getUserInfo() != null) {
            login = Spoon.getInstance().rep.getUserInfo().getLogin();
        }

        jsonObject.put(login, jsonArray);
        props.setRecentSearches(jsonObject.toJSONString());
    } catch (Exception e) {
        // Log error in console
    }

    return recentSearches;
}

From source file:org.apache.hadoop.hbase.regionserver.HLog.java

private static List<Path> splitLog(final Path rootDir, final FileStatus[] logfiles, final FileSystem fs,
        final HBaseConfiguration conf) throws IOException {
    final Map<byte[], WriterAndPath> logWriters = Collections
            .synchronizedMap(new TreeMap<byte[], WriterAndPath>(Bytes.BYTES_COMPARATOR));
    List<Path> splits = null;

    // Number of threads to use when log splitting to rewrite the logs.
    // More means faster but bigger mem consumption.
    int logWriterThreads = conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3);

    // Number of logs to read concurrently when log splitting.
    // More means faster but bigger mem consumption  */
    int concurrentLogReads = conf.getInt("hbase.regionserver.hlog.splitlog.reader.threads", 3);
    // Is append supported?
    boolean append = isAppend(conf);
    try {//from  w ww.j a  v a2s. c o  m
        int maxSteps = Double.valueOf(Math.ceil((logfiles.length * 1.0) / concurrentLogReads)).intValue();
        for (int step = 0; step < maxSteps; step++) {
            final Map<byte[], LinkedList<HLogEntry>> logEntries = new TreeMap<byte[], LinkedList<HLogEntry>>(
                    Bytes.BYTES_COMPARATOR);
            // Stop at logfiles.length when it's the last step
            int endIndex = step == maxSteps - 1 ? logfiles.length
                    : step * concurrentLogReads + concurrentLogReads;
            for (int i = (step * concurrentLogReads); i < endIndex; i++) {
                // Check for possibly empty file. With appends, currently Hadoop 
                // reports a zero length even if the file has been sync'd. Revisit if
                // HADOOP-4751 is committed.
                long length = logfiles[i].getLen();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Splitting hlog " + (i + 1) + " of " + logfiles.length + ": "
                            + logfiles[i].getPath() + ", length=" + logfiles[i].getLen());
                }
                recoverLog(fs, logfiles[i].getPath(), append);
                SequenceFile.Reader in = null;
                int count = 0;
                try {
                    in = new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
                    try {
                        HLogKey key = newKey(conf);
                        KeyValue val = new KeyValue();
                        while (in.next(key, val)) {
                            byte[] regionName = key.getRegionName();
                            LinkedList<HLogEntry> queue = logEntries.get(regionName);
                            if (queue == null) {
                                queue = new LinkedList<HLogEntry>();
                                LOG.debug("Adding queue for " + Bytes.toStringBinary(regionName));
                                logEntries.put(regionName, queue);
                            }
                            HLogEntry hle = new HLogEntry(val, key);
                            queue.push(hle);
                            count++;
                            // Make the key and value new each time; otherwise same instance
                            // is used over and over.
                            key = newKey(conf);
                            val = new KeyValue();
                        }
                        LOG.debug("Pushed=" + count + " entries from " + logfiles[i].getPath());
                    } catch (IOException e) {
                        LOG.debug("IOE Pushed=" + count + " entries from " + logfiles[i].getPath());
                        e = RemoteExceptionHandler.checkIOException(e);
                        if (!(e instanceof EOFException)) {
                            LOG.warn("Exception processing " + logfiles[i].getPath()
                                    + " -- continuing. Possible DATA LOSS!", e);
                        }
                    }
                } catch (IOException e) {
                    if (length <= 0) {
                        LOG.warn("Empty hlog, continuing: " + logfiles[i] + " count=" + count, e);
                        continue;
                    }
                    throw e;
                } finally {
                    try {
                        if (in != null) {
                            in.close();
                        }
                    } catch (IOException e) {
                        LOG.warn("Close in finally threw exception -- continuing", e);
                    }
                    // Delete the input file now so we do not replay edits. We could
                    // have gotten here because of an exception. If so, probably
                    // nothing we can do about it. Replaying it, it could work but we
                    // could be stuck replaying for ever. Just continue though we
                    // could have lost some edits.
                    fs.delete(logfiles[i].getPath(), true);
                }
            }
            ExecutorService threadPool = Executors.newFixedThreadPool(logWriterThreads);
            for (final byte[] key : logEntries.keySet()) {
                Thread thread = new Thread(Bytes.toStringBinary(key)) {
                    @Override
                    public void run() {
                        LinkedList<HLogEntry> entries = logEntries.get(key);
                        LOG.debug("Thread got " + entries.size() + " to process");
                        long threadTime = System.currentTimeMillis();
                        try {
                            int count = 0;
                            // Items were added to the linkedlist oldest first. Pull them
                            // out in that order.
                            for (ListIterator<HLogEntry> i = entries.listIterator(entries.size()); i
                                    .hasPrevious();) {
                                HLogEntry logEntry = i.previous();
                                WriterAndPath wap = logWriters.get(key);
                                if (wap == null) {
                                    Path logfile = new Path(
                                            HRegion.getRegionDir(
                                                    HTableDescriptor.getTableDir(rootDir,
                                                            logEntry.getKey().getTablename()),
                                                    HRegionInfo.encodeRegionName(key)),
                                            HREGION_OLDLOGFILE_NAME);
                                    Path oldlogfile = null;
                                    SequenceFile.Reader old = null;
                                    if (fs.exists(logfile)) {
                                        FileStatus stat = fs.getFileStatus(logfile);
                                        if (stat.getLen() <= 0) {
                                            LOG.warn("Old hlog file " + logfile + " is zero "
                                                    + "length. Deleting existing file");
                                            fs.delete(logfile, false);
                                        } else {
                                            LOG.warn("Old hlog file " + logfile + " already "
                                                    + "exists. Copying existing file to new file");
                                            oldlogfile = new Path(logfile.toString() + ".old");
                                            fs.rename(logfile, oldlogfile);
                                            old = new SequenceFile.Reader(fs, oldlogfile, conf);
                                        }
                                    }
                                    SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, logfile,
                                            getKeyClass(conf), KeyValue.class, getCompressionType(conf));
                                    wap = new WriterAndPath(logfile, w);
                                    logWriters.put(key, wap);
                                    if (LOG.isDebugEnabled()) {
                                        LOG.debug("Creating new hlog file writer for path " + logfile
                                                + " and region " + Bytes.toStringBinary(key));
                                    }

                                    if (old != null) {
                                        // Copy from existing log file
                                        HLogKey oldkey = newKey(conf);
                                        KeyValue oldval = new KeyValue();
                                        for (; old.next(oldkey, oldval); count++) {
                                            if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
                                                LOG.debug("Copied " + count + " edits");
                                            }
                                            w.append(oldkey, oldval);
                                        }
                                        old.close();
                                        fs.delete(oldlogfile, true);
                                    }
                                }
                                wap.w.append(logEntry.getKey(), logEntry.getEdit());
                                count++;
                            }
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Applied " + count + " total edits to " + Bytes.toStringBinary(key)
                                        + " in " + (System.currentTimeMillis() - threadTime) + "ms");
                            }
                        } catch (IOException e) {
                            e = RemoteExceptionHandler.checkIOException(e);
                            LOG.warn("Got while writing region " + Bytes.toStringBinary(key) + " log " + e);
                            e.printStackTrace();
                        }
                    }
                };
                threadPool.execute(thread);
            }
            threadPool.shutdown();
            // Wait for all threads to terminate
            try {
                for (int i = 0; !threadPool.awaitTermination(5, TimeUnit.SECONDS); i++) {
                    LOG.debug("Waiting for hlog writers to terminate, iteration #" + i);
                }
            } catch (InterruptedException ex) {
                LOG.warn("Hlog writers were interrupted, possible data loss!");
            }
        }
    } finally {
        splits = new ArrayList<Path>(logWriters.size());
        for (WriterAndPath wap : logWriters.values()) {
            wap.w.close();
            LOG.debug("Closed " + wap.p);
            splits.add(wap.p);
        }
    }
    return splits;
}

From source file:org.bimserver.charting.Containers.TreeNode.java

public Iterator<TreeNode> iterateFromLeafNodesToRoot() {
    LinkedList<TreeNode> nodes = new LinkedList<TreeNode>(Arrays.asList(this));
    LinkedList<TreeNode> returningNodes = new LinkedList<>();
    TreeNode thisNode = null;//from   www.j  av a2 s.c  om
    while (nodes.size() > 0) {
        thisNode = nodes.pop();
        returningNodes.push(thisNode);
        if (thisNode.Children.length > 0) {
            int i = -1;
            int n = thisNode.Children.length;
            while (++i < n)
                nodes.push(thisNode.Children[i]);
        }
    }
    return returningNodes.iterator();
}

From source file:org.unitime.timetable.onlinesectioning.server.AbstractServer.java

protected void setCurrentHelper(OnlineSectioningHelper helper) {
    LinkedList<OnlineSectioningHelper> h = sHelper.get();
    if (h == null) {
        h = new LinkedList<OnlineSectioningHelper>();
        sHelper.set(h);/*from   ww w.  ja v  a 2 s  .c o  m*/
    }
    h.push(helper);
}