Example usage for java.util LinkedList peek

List of usage examples for java.util LinkedList peek

Introduction

In this page you can find the example usage for java.util LinkedList peek.

Prototype

public E peek() 

Source Link

Document

Retrieves, but does not remove, the head (first element) of this list.

Usage

From source file:org.jdto.util.expression.Expression.java

/**
 * Parse the expression into something easily evaluable.
 * @param expression/*from w  w w.j  a v a  2  s  . c o  m*/
 * @return 
 */
private synchronized ExpressionTerm parseExpression(String expression) {

    position = 0;

    LinkedList<String> precedenceStack = new LinkedList<String>();

    //add the first imaginary parentheses.
    precedenceStack.push("(");

    //append a closing parenthesis to the expression.
    expression = expression + ")";

    //the previous token.
    String token = null;

    StringBuilder postFix = new StringBuilder();

    /**
     * Go through the expression.
     */
    while (!precedenceStack.isEmpty() && position < expression.length()) {

        //use the token from previous iteration
        token = readToken(token, expression);

        //if is a left parentheses
        if ("(".equals(token)) {
            precedenceStack.push(token);
            postFix.append(" "); //a separation
            continue;
        }

        //check if it is an operator
        Operator operator = Operator.getOperaorByString(token);
        if (operator != null) {
            postFix.append(" "); //add a seprarator char to the result.
            while (operator.precedence(precedenceStack.peek())) {
                postFix.append(precedenceStack.pop());
                postFix.append(" ");
            }
            precedenceStack.push(token);
            continue;
        }

        //check if it is a right parenthesis
        if (")".equals(token)) {
            postFix.append(" "); //add a separator to the result.
            while (!"(".equals(precedenceStack.peek())) {
                String stackElement = precedenceStack.pop();

                if (isOperator(stackElement)) {
                    postFix.append(stackElement);
                    postFix.append(" ");
                }
            }
            //remove the extra parenthesis
            precedenceStack.pop();
            continue;
        }

        //if everything else fails, just add the token to the postfix expr
        postFix.append(token);
        //and we're done with the loop here
    }

    //at this point we need to convert the postfix expression into terms.
    if (!precedenceStack.isEmpty()) {
        throw new IllegalArgumentException("Could not parse expression!");
    }

    return parsePostfixExpr(postFix.toString());
}

From source file:org.nuxeo.ecm.core.storage.sql.PersistenceContext.java

/**
 * Gets the full path, or the closest parent id which we don't have in
 * cache./*from   ww w . j  av a  2s  .  c om*/
 * <p>
 * If {@code fetch} is {@code true}, returns the full path.
 * <p>
 * If {@code fetch} is {@code false}, does not touch the mapper, only the
 * context, therefore may return a missing parent id instead of the path.
 *
 * @param fetch {@code true} if we can use the database, {@code false} if
 *            only caches should be used
 */
public PathAndId getPathOrMissingParentId(SimpleFragment hierFragment, boolean fetch) throws StorageException {
    LinkedList<String> list = new LinkedList<String>();
    Serializable parentId = null;
    while (true) {
        String name = hierFragment.getString(Model.HIER_CHILD_NAME_KEY);
        if (name == null) {
            // (empty string for normal databases, null for Oracle)
            name = "";
        }
        list.addFirst(name);
        parentId = hierFragment.get(Model.HIER_PARENT_KEY);
        if (parentId == null) {
            // root
            break;
        }
        // recurse in the parent
        RowId rowId = new RowId(Model.HIER_TABLE_NAME, parentId);
        hierFragment = (SimpleFragment) getIfPresent(rowId);
        if (hierFragment == null) {
            // try in mapper cache
            hierFragment = (SimpleFragment) getFromMapper(rowId, false, true);
            if (hierFragment == null) {
                if (!fetch) {
                    return new PathAndId(null, parentId);
                }
                hierFragment = (SimpleFragment) getFromMapper(rowId, true, false);
            }
        }
    }
    String path;
    if (list.size() == 1) {
        String name = list.peek();
        if (name.isEmpty()) {
            // root, special case
            path = "/";
        } else {
            // placeless document, no initial slash
            path = name;
        }
    } else {
        path = StringUtils.join(list, "/");
    }
    return new PathAndId(path, null);
}

From source file:org.opencastproject.videoeditor.impl.VideoEditorServiceImpl.java

private static List<VideoClip> sortSegments(List<VideoClip> edits) {
    LinkedList<VideoClip> ll = new LinkedList<VideoClip>();
    List<VideoClip> clips = new ArrayList<VideoClip>();
    Iterator<VideoClip> it = edits.iterator();
    VideoClip clip;//w  ww .jav  a 2  s. c  o  m
    VideoClip nextclip;
    while (it.hasNext()) { // Check for legal durations
        clip = it.next();
        if (clip.getDuration() > 2) { // Keep segments at least 2 seconds long
            ll.add(clip);
        }
    }
    clip = ll.pop(); // initialize
    while (!ll.isEmpty()) { // Check that 2 consecutive segments from same src are at least 2 secs apart
        if (ll.peek() != null) {
            nextclip = ll.pop(); // check next consecutive segment
            if ((nextclip.getSrc() == clip.getSrc()) && (nextclip.getStart() - clip.getEnd()) < 2) { // collapse two segments into one
                clip.setEnd(nextclip.getEnd()); // by using inpt of seg 1 and outpoint of seg 2
            } else {
                clips.add(clip); // keep last segment
                clip = nextclip; // check next segment
            }
        }
    }
    clips.add(clip); // add last segment
    return clips;
}

From source file:org.opencb.opencga.storage.hadoop.variant.HadoopVariantStorageEngine.java

@Override
public List<StoragePipelineResult> index(List<URI> inputFiles, URI outdirUri, boolean doExtract,
        boolean doTransform, boolean doLoad) throws StorageEngineException {

    if (inputFiles.size() == 1 || !doLoad) {
        return super.index(inputFiles, outdirUri, doExtract, doTransform, doLoad);
    }//w w  w  .j  a  va 2s .  c  om

    final boolean doArchive;
    final boolean doMerge;

    if (!getOptions().containsKey(HADOOP_LOAD_ARCHIVE) && !getOptions().containsKey(HADOOP_LOAD_VARIANT)) {
        doArchive = true;
        doMerge = true;
    } else {
        doArchive = getOptions().getBoolean(HADOOP_LOAD_ARCHIVE, false);
        doMerge = getOptions().getBoolean(HADOOP_LOAD_VARIANT, false);
    }

    if (!doArchive && !doMerge) {
        return Collections.emptyList();
    }

    final int nThreadArchive = getOptions().getInt(HADOOP_LOAD_ARCHIVE_BATCH_SIZE, 2);
    ObjectMap extraOptions = new ObjectMap().append(HADOOP_LOAD_ARCHIVE, true).append(HADOOP_LOAD_VARIANT,
            false);

    final List<StoragePipelineResult> concurrResult = new CopyOnWriteArrayList<>();
    List<VariantStoragePipeline> etlList = new ArrayList<>();
    ExecutorService executorService = Executors.newFixedThreadPool(nThreadArchive, r -> {
        Thread t = new Thread(r);
        t.setDaemon(true);
        return t;
    }); // Set Daemon for quick shutdown !!!
    LinkedList<Future<StoragePipelineResult>> futures = new LinkedList<>();
    List<Integer> indexedFiles = new CopyOnWriteArrayList<>();
    for (URI inputFile : inputFiles) {
        //Provide a connected storageETL if load is required.

        VariantStoragePipeline storageETL = newStorageETL(doLoad, new ObjectMap(extraOptions));
        futures.add(executorService.submit(() -> {
            try {
                Thread.currentThread().setName(Paths.get(inputFile).getFileName().toString());
                StoragePipelineResult storagePipelineResult = new StoragePipelineResult(inputFile);
                URI nextUri = inputFile;
                boolean error = false;
                if (doTransform) {
                    try {
                        nextUri = transformFile(storageETL, storagePipelineResult, concurrResult, nextUri,
                                outdirUri);

                    } catch (StoragePipelineException ignore) {
                        //Ignore here. Errors are stored in the ETLResult
                        error = true;
                    }
                }

                if (doLoad && doArchive && !error) {
                    try {
                        loadFile(storageETL, storagePipelineResult, concurrResult, nextUri, outdirUri);
                    } catch (StoragePipelineException ignore) {
                        //Ignore here. Errors are stored in the ETLResult
                        error = true;
                    }
                }
                if (doLoad && !error) {
                    // Read the VariantSource to get the original fileName (it may be different from the
                    // nextUri.getFileName if this is the transformed file)
                    String fileName = storageETL.readVariantSource(nextUri, null).getFileName();
                    // Get latest study configuration from DB, might have been changed since
                    StudyConfiguration studyConfiguration = storageETL.getStudyConfiguration();
                    // Get file ID for the provided file name
                    Integer fileId = studyConfiguration.getFileIds().get(fileName);
                    indexedFiles.add(fileId);
                }
                return storagePipelineResult;
            } finally {
                try {
                    storageETL.close();
                } catch (StorageEngineException e) {
                    logger.error("Issue closing DB connection ", e);
                }
            }
        }));
    }

    executorService.shutdown();

    int errors = 0;
    try {
        while (!futures.isEmpty()) {
            executorService.awaitTermination(1, TimeUnit.MINUTES);
            // Check values
            if (futures.peek().isDone() || futures.peek().isCancelled()) {
                Future<StoragePipelineResult> first = futures.pop();
                StoragePipelineResult result = first.get(1, TimeUnit.MINUTES);
                if (result.getTransformError() != null) {
                    //TODO: Handle errors. Retry?
                    errors++;
                    result.getTransformError().printStackTrace();
                } else if (result.getLoadError() != null) {
                    //TODO: Handle errors. Retry?
                    errors++;
                    result.getLoadError().printStackTrace();
                }
                concurrResult.add(result);
            }
        }
        if (errors > 0) {
            throw new StoragePipelineException("Errors found", concurrResult);
        }

        if (doLoad && doMerge) {
            int batchMergeSize = getOptions().getInt(HADOOP_LOAD_VARIANT_BATCH_SIZE, 10);
            // Overwrite default ID list with user provided IDs
            List<Integer> pendingFiles = indexedFiles;
            if (getOptions().containsKey(HADOOP_LOAD_VARIANT_PENDING_FILES)) {
                List<Integer> idList = getOptions().getAsIntegerList(HADOOP_LOAD_VARIANT_PENDING_FILES);
                if (!idList.isEmpty()) {
                    // only if the list is not empty
                    pendingFiles = idList;
                }
            }

            List<Integer> filesToMerge = new ArrayList<>(batchMergeSize);
            int i = 0;
            for (Iterator<Integer> iterator = pendingFiles.iterator(); iterator.hasNext(); i++) {
                Integer indexedFile = iterator.next();
                filesToMerge.add(indexedFile);
                if (filesToMerge.size() == batchMergeSize || !iterator.hasNext()) {
                    extraOptions = new ObjectMap().append(HADOOP_LOAD_ARCHIVE, false)
                            .append(HADOOP_LOAD_VARIANT, true)
                            .append(HADOOP_LOAD_VARIANT_PENDING_FILES, filesToMerge);

                    AbstractHadoopVariantStoragePipeline localEtl = newStorageETL(doLoad, extraOptions);

                    int studyId = getOptions().getInt(Options.STUDY_ID.key());
                    localEtl.preLoad(inputFiles.get(i), outdirUri);
                    localEtl.merge(studyId, filesToMerge);
                    localEtl.postLoad(inputFiles.get(i), outdirUri);
                    filesToMerge.clear();
                }
            }

            annotateLoadedFiles(outdirUri, inputFiles, concurrResult, getOptions());
            calculateStatsForLoadedFiles(outdirUri, inputFiles, concurrResult, getOptions());

        }
    } catch (InterruptedException e) {
        Thread.interrupted();
        throw new StoragePipelineException("Interrupted!", e, concurrResult);
    } catch (ExecutionException e) {
        throw new StoragePipelineException("Execution exception!", e, concurrResult);
    } catch (TimeoutException e) {
        throw new StoragePipelineException("Timeout Exception", e, concurrResult);
    } finally {
        if (!executorService.isShutdown()) {
            try {
                executorService.shutdownNow();
            } catch (Exception e) {
                logger.error("Problems shutting executer service down", e);
            }
        }
    }
    return concurrResult;
}

From source file:org.seasr.meandre.components.analytics.socialnetworking.AbstractLinkCreationComponent.java

@Override
public void executeCallBack(ComponentContext cc) throws Exception {
    Strings inMetaTuple = (Strings) cc.getDataComponentFromInput(IN_META_TUPLE);
    SimpleTuplePeer tuplePeer = new SimpleTuplePeer(inMetaTuple);
    console.fine("Input meta tuple: " + tuplePeer.toString());

    StringsArray inTuples = (StringsArray) cc.getDataComponentFromInput(IN_TUPLES);
    Strings[] tuples = BasicDataTypesTools.stringsArrayToJavaArray(inTuples);

    int SENTENCE_ID_IDX = tuplePeer.getIndexForFieldName(OpenNLPNamedEntity.SENTENCE_ID_FIELD);
    int TYPE_IDX = tuplePeer.getIndexForFieldName(OpenNLPNamedEntity.TYPE_FIELD);
    int TEXT_IDX = tuplePeer.getIndexForFieldName(OpenNLPNamedEntity.TEXT_FIELD);

    // Linked list of sentences keyed by sentence id - the HashSet is the set of entities in that sentence
    LinkedList<KeyValuePair<Integer, HashSet<Entity>>> _sentencesWindow = new LinkedList<KeyValuePair<Integer, HashSet<Entity>>>();

    // Note: The algorithm used to mark entities as adjacent if they fall within the specified sentence distance
    //       relies on a sliding-window of sentences that are within the 'adjacency' range. As new sentences are
    //       considered, the window moves to the right and old sentences that are now too far fall out of scope.

    SimpleTuple tuple = tuplePeer.createTuple();
    for (Strings t : tuples) {
        tuple.setValues(t);//w w w .  j a v a 2  s . com

        Integer sentenceId = Integer.parseInt(tuple.getValue(SENTENCE_ID_IDX));
        String tupleType = tuple.getValue(TYPE_IDX);
        String tupleValue = tuple.getValue(TEXT_IDX);

        // If the entity is of the type we're interested in
        if (_entityTypes.contains(tupleType)) {

            if (_normalizeEntities) {
                // Normalize whitespaces
                StringBuilder sb = new StringBuilder();
                Matcher nonWhitespaceMatcher = REGEXP_NONWHITESPACE.matcher(tupleValue);
                while (nonWhitespaceMatcher.find())
                    sb.append(" ").append(nonWhitespaceMatcher.group(1));

                if (sb.length() > 0)
                    tupleValue = sb.substring(1);
                else
                    continue;

                // Normalize people's names
                if (tupleType.toLowerCase().equals("person")) {
                    sb = new StringBuilder();
                    Matcher personMatcher = REGEXP_PERSON.matcher(tupleValue);
                    while (personMatcher.find())
                        sb.append(" ").append(personMatcher.group(1));

                    if (sb.length() > 0)
                        tupleValue = sb.substring(1);
                    else
                        continue;

                    // ignore names with 1 character
                    if (tupleValue.length() == 1)
                        continue;
                }

                tupleValue = WordUtils.capitalizeFully(tupleValue);
            }

            // ... create an object for it
            Entity entity = new Entity(tupleType, tupleValue);

            // Check if we already recorded this entity before
            Entity oldEntity = _entities.get(entity);
            if (oldEntity == null)
                // If not, record it
                _entities.put(entity, entity);
            else
                // Otherwise retrieve the entity we used before
                entity = oldEntity;

            HashSet<Entity> sentenceEntities;

            // Remove all sentences (together with any entities they contained) from the set
            // of sentences that are too far from the current sentence of this entity
            while (_sentencesWindow.size() > 0 && sentenceId - _sentencesWindow.peek().getKey() > _offset)
                _sentencesWindow.remove();

            if (_sentencesWindow.size() > 0) {
                // If this sentence is different from the last sentence in the window
                if (_sentencesWindow.getLast().getKey() != sentenceId) {
                    // Create an entry for it and add it at the end of the window
                    sentenceEntities = new HashSet<Entity>();
                    _sentencesWindow
                            .addLast(new KeyValuePair<Integer, HashSet<Entity>>(sentenceId, sentenceEntities));
                } else
                    sentenceEntities = _sentencesWindow.getLast().getValue();
            } else {
                // If there are no sentences in the window, create an entry for this sentence and add it
                sentenceEntities = new HashSet<Entity>();
                _sentencesWindow
                        .addLast(new KeyValuePair<Integer, HashSet<Entity>>(sentenceId, sentenceEntities));
            }

            // Iterate through all the sentences in the window
            for (KeyValuePair<Integer, HashSet<Entity>> kvp : _sentencesWindow)
                // ... and all the entities in each sentence
                for (Entity e : kvp.getValue()) {
                    // ignore self-references
                    if (e.equals(entity))
                        continue;

                    // ... and mark the new entity as being adjacent to all the entities in the window
                    e.addOutwardLink(entity);
                    entity.addInwardLink(e);
                }

            // Add the new entity to the window
            sentenceEntities.add(entity);
        }
    }

    if (!_isStreaming)
        generateAndPushOutputInternal();
}

From source file:org.unitime.timetable.onlinesectioning.server.AbstractServer.java

protected OnlineSectioningHelper getCurrentHelper() {
    LinkedList<OnlineSectioningHelper> h = sHelper.get();
    if (h == null || h.isEmpty())
        return new OnlineSectioningHelper(null);
    return h.peek();
}