Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:org.deeplearning4j.models.word2vec.Word2Vec.java

/**
 * Train the model/*from w  w  w .  j av a  2  s  .c om*/
 */
public void fit() throws IOException {
    boolean loaded = buildVocab();
    //save vocab after building
    if (!loaded && saveVocab)
        vocab().saveVocab();
    if (stopWords == null)
        readStopWords();

    log.info("Training word2vec multithreaded");

    if (sentenceIter != null)
        sentenceIter.reset();
    if (docIter != null)
        docIter.reset();

    int[] docs = vectorizer.index().allDocs();

    if (docs.length < 1) {
        vectorizer.fit();
    }

    docs = vectorizer.index().allDocs();
    if (docs.length < 1) {
        throw new IllegalStateException("No documents found");
    }

    totalWords = vectorizer.numWordsEncountered();
    if (totalWords < 1)
        throw new IllegalStateException("Unable to train, total words less than 1");

    totalWords *= numIterations;

    log.info("Processing sentences...");

    AtomicLong numWordsSoFar = new AtomicLong(0);
    final AtomicLong nextRandom = new AtomicLong(5);
    ExecutorService exec = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(),
            Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                    }
                    executor.submit(r);
                }
            });

    final Queue<List<VocabWord>> batch2 = new ConcurrentLinkedDeque<>();
    vectorizer.index().eachDoc(new Function<List<VocabWord>, Void>() {
        @Override
        public Void apply(List<VocabWord> input) {
            List<VocabWord> batch = new ArrayList<>();
            addWords(input, nextRandom, batch);
            if (!batch.isEmpty()) {
                batch2.add(batch);
            }

            return null;
        }
    }, exec);

    exec.shutdown();
    try {
        exec.awaitTermination(1, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    ActorSystem actorSystem = ActorSystem.create();

    for (int i = 0; i < numIterations; i++)
        doIteration(batch2, numWordsSoFar, nextRandom, actorSystem);
    actorSystem.shutdown();

}

From source file:com.streamsets.pipeline.lib.jdbc.multithread.TestMultithreadedTableProvider.java

@Test
public void addTableNotPartitioned() throws InterruptedException, StageException {
    String schema = "db";
    String table1Name = "table1";
    String table2Name = "table2";
    String offsetCol = null;//from w w  w .  j a  v  a  2 s .c om
    final String partitionSize = null;
    int maxActivePartitions = 0;
    int threadNumber = 0;
    int numThreads = 1;

    TableContext table1 = createTableContext(schema, table1Name, offsetCol, partitionSize, maxActivePartitions,
            true);

    MultithreadedTableProvider provider = createTableProvider(numThreads, table1,
            BatchTableStrategy.PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE);

    TableRuntimeContext tableRuntimeContext = provider.nextTable(threadNumber);
    Assert.equals(table1Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);

    tableRuntimeContext = provider.nextTable(threadNumber);
    Assert.equals(table1Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);

    TableContext table2 = createTableContext(schema, table2Name, offsetCol, partitionSize, maxActivePartitions,
            true);
    Map<String, TableContext> tableContextMap = new HashMap<>();

    tableContextMap.put(table1.getQualifiedName(), table1);
    tableContextMap.put(table2.getQualifiedName(), table2);
    Queue<String> sortedTableOrder = new LinkedList<>();
    sortedTableOrder.add(table1.getQualifiedName());
    sortedTableOrder.add(table2.getQualifiedName());

    //Set added table lists
    provider.setTableContextMap(tableContextMap, sortedTableOrder);

    tableRuntimeContext = provider.nextTable(threadNumber);

    Assert.equals(table1Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);

    tableRuntimeContext = provider.nextTable(threadNumber);
    Assert.equals(table2Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);
}

From source file:com.thoughtworks.go.server.service.dd.DependencyFanInNode.java

private void addToRevisionQueue(PipelineTimelineEntry entry,
        Queue<PipelineTimelineEntry.Revision> revisionQueue, List<FaninScmMaterial> scmMaterials,
        FanInGraphContext context, Set<CaseInsensitiveString> visitedNodes) {
    for (Map.Entry<String, List<PipelineTimelineEntry.Revision>> revisionList : entry.revisions().entrySet()) {
        String fingerprint = revisionList.getKey();
        PipelineTimelineEntry.Revision revision = revisionList.getValue().get(0);
        if (isScmMaterial(fingerprint, context)) {
            scmMaterials.add(new FaninScmMaterial(fingerprint, revision));
            continue;
        }/*w w w. j  a va  2  s . com*/

        if (isDependencyMaterial(fingerprint, context)
                && !visitedNodes.contains(new CaseInsensitiveString(revision.revision))) {
            revisionQueue.add(revision);
            visitedNodes.add(new CaseInsensitiveString(revision.revision));
        }
    }
}

From source file:edu.snu.leader.hidden.SimulationState.java

/**
 * Signals that the specified individual has canceled a group movement
 *
 * @param individual//ww  w . ja va2  s  .  co m
 */
public void cancelInitiation(SpatialIndividual individual) {
    if (_LOG.isDebugEnabled()) {
        _LOG.debug("Before cancel [" + individual.getID() + "]: eligibleInitiators=["
                + _eligibleInitiators.size() + "] remaining=[" + _remaining.size() + "] totalFollowers=["
                + individual.getTotalFollowerCount() + "]");
    }

    // Send it a signal so it can log some information
    individual.signalInitiationFailure(this);

    // We need to maintain a list of all the affected individuals
    List<SpatialIndividual> affected = new LinkedList<SpatialIndividual>();

    // Build the list starting with the initiator itself
    Queue<SpatialIndividual> indsToProcess = new LinkedList<SpatialIndividual>();
    indsToProcess.add(individual);
    while (!indsToProcess.isEmpty()) {
        // Get the first in the queue
        SpatialIndividual current = indsToProcess.remove();

        //            _LOG.debug( "Processing ["
        //                    + current.getID()
        //                    + "]" );

        // Add it to the list
        affected.add(current);

        // Add it's immediate followers to the queue for processing
        Iterator<Neighbor> followerIter = current.getFollowers().iterator();
        while (followerIter.hasNext()) {
            indsToProcess.add(followerIter.next().getIndividual());
        }
    }

    /* Iterate through all the affected individuals to change them from
     * departed to remaining and tell them to cancel */
    Iterator<SpatialIndividual> affectedIter = affected.iterator();
    while (affectedIter.hasNext()) {
        SpatialIndividual current = affectedIter.next();

        //            _LOG.debug( "Processing affected ["
        //                    + current.getID()
        //                    + "]" );

        // Remove the individual from the departed group
        _departed.remove(current.getID());

        // Add it to the remaining group
        _remaining.put(current.getID(), current);

        // Tell it to cancel
        current.cancel();

    }

    /* Iterate through the list again to see if they are eligible
     * initiators.  We couldn't do it during the last pass through since
     * we hadn't cleaned up all the groups yet. */
    //        affectedIter = affected.iterator();
    affectedIter = _remaining.values().iterator();
    while (affectedIter.hasNext()) {
        SpatialIndividual current = affectedIter.next();

        // Are any of the individual's neighbors initiators or followers?
        boolean eligible = true;
        Iterator<Neighbor> neighborIter = current.getNearestNeighbors().iterator();
        while (eligible && neighborIter.hasNext()) {
            // Can tell by looking at the group ID
            Neighbor neighbor = neighborIter.next();
            if (null != neighbor.getIndividual().getGroupID()) {
                /* The neighbor belongs to a group, the individual is NOT
                 * eligible. */
                eligible = false;
            }
        }

        // Is the individual eligible?
        if (eligible) {
            // Yup
            _eligibleInitiators.put(current.getID(), current);
        } else {
            // Nope, tell them who their first mover was
            // Iterate through the list of departed individuals and
            // find the first nearest neighbor
            Iterator<SpatialIndividual> departedIter = _departed.values().iterator();
            while (departedIter.hasNext()) {
                SpatialIndividual departedInd = departedIter.next();
                if (current.isNearestNeighbor(departedInd)) {
                    current.observeFirstMover(departedInd);
                    break;
                }
            }
        }

        /* Check all the individuals not yet departed to see if they
         * observed this individual as a first mover.  If so, reset their
         * first mover if no other neighbors have departed or if another
         * has departed, set it to that neighbor */
        Iterator<SpatialIndividual> remainingIter = _remaining.values().iterator();
        while (remainingIter.hasNext()) {
            SpatialIndividual currentRemaining = remainingIter.next();
            Neighbor firstMover = currentRemaining.getFirstMover();
            if ((null != firstMover) && (firstMover.getIndividual().getID().equals(current.getID()))) {
                // Reset the first mover
                currentRemaining.resetFirstMover();

                // See if they now have another first mover
                Iterator<SpatialIndividual> departedIter = _departed.values().iterator();
                while (departedIter.hasNext()) {
                    SpatialIndividual departedInd = departedIter.next();
                    if (currentRemaining.isNearestNeighbor(departedInd)) {
                        currentRemaining.observeFirstMover(departedInd);
                        break;
                    }
                }
            }
        }
    }

    _LOG.debug("After cancel: eligibleInitiators=[" + _eligibleInitiators.size() + "] remaining=["
            + _remaining.size() + "]");
}

From source file:com.streamsets.pipeline.lib.jdbc.multithread.TestMultithreadedTableProvider.java

@Test
public void removeTableNotPartitioned() throws InterruptedException, StageException {
    String schema = "db";
    String table1Name = "table1";
    String table2Name = "table2";
    String offsetCol = null;// www .ja  v a2  s . com
    final String partitionSize = null;
    int maxActivePartitions = 0;
    int threadNumber = 0;
    int numThreads = 1;

    TableContext table1 = createTableContext(schema, table1Name, offsetCol, partitionSize, maxActivePartitions,
            true);
    TableContext table2 = createTableContext(schema, table2Name, offsetCol, partitionSize, maxActivePartitions,
            true);
    Map<String, TableContext> tableContextMap = new HashMap<>();

    tableContextMap.put(table1.getQualifiedName(), table1);
    tableContextMap.put(table2.getQualifiedName(), table2);
    Queue<String> sortedTableOrder = new LinkedList<>();
    sortedTableOrder.add(table1.getQualifiedName());
    sortedTableOrder.add(table2.getQualifiedName());

    Map threadNumToMaxTableSlots = new HashMap<>();

    BatchTableStrategy batchTableStrategy = BatchTableStrategy.PROCESS_ALL_AVAILABLE_ROWS_FROM_TABLE;
    MultithreadedTableProvider provider = new MultithreadedTableProvider(tableContextMap, sortedTableOrder,
            threadNumToMaxTableSlots, numThreads, batchTableStrategy);

    TableRuntimeContext tableRuntimeContext = provider.nextTable(threadNumber);
    Assert.equals(table1Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);

    tableRuntimeContext = provider.nextTable(threadNumber);
    Assert.equals(table2Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);

    tableContextMap.remove(table2.getQualifiedName());
    sortedTableOrder.remove(table2.getQualifiedName());
    //Set removed table lists
    provider.setTableContextMap(tableContextMap, sortedTableOrder);

    tableRuntimeContext = provider.nextTable(threadNumber);

    Assert.equals(table1Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);

    tableRuntimeContext = provider.nextTable(threadNumber);
    Assert.equals(table1Name, tableRuntimeContext.getSourceTableContext().getTableName());
    provider.releaseOwnedTable(tableRuntimeContext, threadNumber);
}

From source file:com.jaspersoft.jasperserver.war.cascade.token.FilterCore.java

@Override
public LinkedHashSet<String> resolveCascadingOrder(Map<String, Set<String>> masterDependencies) {
    Deque<String> orderedNames = new LinkedList<String>();
    Queue<String> workingQueue = new LinkedList<String>(masterDependencies.keySet());
    int maxIterations = (masterDependencies.size() * (masterDependencies.size() + 1)) / 2 + 1;
    while (workingQueue.size() > 0 && maxIterations-- > 0) {
        String currentName = workingQueue.remove();

        Set<String> masterDependency = masterDependencies.get(currentName);
        if (masterDependency == null || masterDependency.isEmpty()) {
            orderedNames.addFirst(currentName);
        } else {/*from w ww . j  av  a 2 s. c  o m*/
            if (orderedNames.containsAll(masterDependency)) {
                orderedNames.addLast(currentName);
            } else {
                workingQueue.add(currentName);
            }
        }
    }
    if (maxIterations > 0) {
        return new LinkedHashSet<String>(orderedNames);
    } else {
        throw new JSException("Order cannot be resolved because of circular or non-existing dependencies.");
    }
}

From source file:org.apache.hadoop.hbase.chaos.actions.RollingBatchRestartRsAction.java

@Override
public void perform() throws Exception {
    LOG.info(String.format("Performing action: Rolling batch restarting %d%% of region servers",
            (int) (ratio * 100)));
    List<ServerName> selectedServers = PolicyBasedChaosMonkey.selectRandomItems(getCurrentServers(), ratio);

    Queue<ServerName> serversToBeKilled = new LinkedList<ServerName>(selectedServers);
    Queue<ServerName> deadServers = new LinkedList<ServerName>();

    ////  w ww .  j  av  a 2  s.co  m
    while (!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) {
        boolean action = true; //action true = kill server, false = start server

        if (serversToBeKilled.isEmpty() || deadServers.isEmpty()) {
            action = deadServers.isEmpty();
        } else {
            action = RandomUtils.nextBoolean();
        }

        if (action) {
            ServerName server = serversToBeKilled.remove();
            try {
                killRs(server);
            } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
                // We've seen this in test runs where we timeout but the kill went through. HBASE-9743
                // So, add to deadServers even if exception so the start gets called.
                LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
            }
            deadServers.add(server);
        } else {
            try {
                ServerName server = deadServers.remove();
                startRs(server);
            } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
                // The start may fail but better to just keep going though we may lose server.
                //
                LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e);
            }
        }

        sleep(RandomUtils.nextInt((int) sleepTime));
    }
}

From source file:hudson.plugins.plot.XMLSeries.java

/***
 * This is a fallback strategy for nodesets that include non numeric content
 * enabling users to create lists by selecting them such that names and
 * values share a common parent. If a node has attributes and is empty that
 * node will be re-enqueued as a parent to its attributes.
 * /*from w  w  w  . j av a 2  s . c o m*/
 * @param buildNumber
 *            the build number
 * 
 * @returns a list of PlotPoints where the label is the last non numeric
 *          text content and the value is the last numeric text content for
 *          each set of nodes under a given parent.
 ***/
private List<PlotPoint> coalesceTextnodesAsLabelsStrategy(NodeList nodeList, int buildNumber) {
    Map<Node, List<Node>> parentNodeMap = new HashMap<Node, List<Node>>();

    for (int i = 0; i < nodeList.getLength(); i++) {
        Node node = nodeList.item(i);
        if (!parentNodeMap.containsKey(node.getParentNode())) {
            parentNodeMap.put(node.getParentNode(), new ArrayList<Node>());
        }
        parentNodeMap.get(node.getParentNode()).add(node);
    }

    List<PlotPoint> retval = new ArrayList<PlotPoint>();
    Queue<Node> parents = new ArrayDeque<Node>(parentNodeMap.keySet());
    while (!parents.isEmpty()) {
        Node parent = parents.poll();
        Double value = null;
        String label = null;

        for (Node child : parentNodeMap.get(parent)) {
            if (null == child.getTextContent() || child.getTextContent().trim().isEmpty()) {
                NamedNodeMap attrmap = child.getAttributes();
                List<Node> attrs = new ArrayList<Node>();
                for (int i = 0; i < attrmap.getLength(); i++) {
                    attrs.add(attrmap.item(i));
                }
                parentNodeMap.put(child, attrs);
                parents.add(child);
            } else if (new Scanner(child.getTextContent().trim()).hasNextDouble()) {
                value = new Scanner(child.getTextContent().trim()).nextDouble();
            } else {
                label = child.getTextContent().trim();
            }
        }
        if ((label != null) && (value != null)) {
            addValueToList(retval, new String(label), String.valueOf(value), buildNumber);
        }
    }
    return retval;
}

From source file:candr.yoclip.ParserTest.java

@Test
public void getParsedOption() {
    final ParserOption<ParserTest> mockedOption = createMockOption("o");
    when(mockedOption.hasValue()).thenReturn(true);
    final List<ParserOption<ParserTest>> parserOptions = Arrays.asList(mockedOption);

    final ParserOptions<ParserTest> mockParserOptions = createMockParserParameters("+", "=");
    when(mockParserOptions.get()).thenReturn(parserOptions);
    when(mockParserOptions.get("o")).thenReturn(mockedOption);

    final Queue<String> parameters = new LinkedList<String>();
    final Parser<ParserTest> testCase = new Parser<ParserTest>(mockParserOptions,
            createMockParserHelpFactory());
    assertThat("empty queue", testCase.getParsedOption(parameters), nullValue());

    parameters.add("+o=foobar");
    ParsedOption<ParserTest> parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o error", parsedOption.isError(), is(false));
    assertThat("+o parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o value", parsedOption.getValue(), is("foobar"));
    assertThat("queue size after parsed parameter", parameters.size(), is(0));

    parameters.add("+o=");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o missing value error", parsedOption.isError(), is(false));
    assertThat("+o missing value parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o missing value not null", parsedOption.getValue(), nullValue());
    assertThat("queue size after missing value", parameters.size(), is(0));

    parameters.add("+o");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o missing separator error", parsedOption.isError(), is(false));
    assertThat("+o missing separator parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o missing separator not null", parsedOption.getValue(), nullValue());
    assertThat("queue size after missing separator", parameters.size(), is(0));

    when(mockParserOptions.getSeparator()).thenReturn(" ");
    parameters.add("+o");
    parameters.add("foobar");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o whitespace error", parsedOption.isError(), is(false));
    assertThat("+o whitespace parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o whitespace value", parsedOption.getValue(), is("foobar"));
    assertThat("+o whitespace queue size", parameters.size(), is(0));

    when(mockParserOptions.getSeparator()).thenReturn(" ");
    parameters.add("+o");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o whitespace missing value error", parsedOption.isError(), is(false));
    assertThat("+o whitespace missing value parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o whitespace missing value not null", parsedOption.getValue(), nullValue());
    assertThat("+o whitespace queue size after missing value", parameters.size(), is(0));

    when(mockParserOptions.getSeparator()).thenReturn("=");
    when(mockedOption.hasValue()).thenReturn(false);
    parameters.add("+o=value");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o with value error", parsedOption.isError(), is(true));
    assertThat("+o with value parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o with value error is null", parsedOption.getError(), notNullValue());
    assertThat("+o whitespace queue size after missing value", parameters.size(), is(0));

    parameters.add("+o");
    parsedOption = testCase.getParsedOption(parameters);
    assertThat("+o with boolean", parsedOption.isError(), is(false));
    assertThat("+o with boolean parser option", parsedOption.getParserOption(), is(mockedOption));
    assertThat("+o with boolean value", parsedOption.getValue(), is(Boolean.TRUE.toString()));
    assertThat("+o whitespace queue size after missing value", parameters.size(), is(0));
}

From source file:candr.yoclip.ParserTest.java

@Test
public void getParsedOptionProperty() {
    final ParserOption<ParserTest> mockOptionProperty = createMockOptionProperty("T");
    final List<ParserOption<ParserTest>> parserOptions = Arrays.asList(mockOptionProperty);

    final ParserOptions<ParserTest> mockParserOptions = createMockParserParameters("++");
    when(mockParserOptions.get()).thenReturn(parserOptions);
    when(mockParserOptions.get("T")).thenReturn(mockOptionProperty);

    final Queue<String> parameters = new LinkedList<String>();

    final Parser<ParserTest> testCase = new Parser<ParserTest>(mockParserOptions,
            createMockParserHelpFactory());
    assertThat("empty queue", testCase.getParsedOptionProperty(parameters), nullValue());

    final String expectedArgument = "argument";
    parameters.add(expectedArgument);
    assertThat("parsed parameter with argument", testCase.getParsedOptionProperty(parameters), nullValue());
    assertThat("queue size after parsed parameter error", parameters.size(), is(1));

    parameters.clear();/*from  ww  w.  ja v a2 s  .com*/
    parameters.add("++Tfoo=bar");
    final ParsedOption<ParserTest> parsedOption = testCase.getParsedOptionProperty(parameters);
    assertThat("ParsedOption error", parsedOption.isError(), is(false));
    assertThat("parser option", parsedOption.getParserOption(), is(mockOptionProperty));
    assertThat("value", parsedOption.getValue(), is("foo=bar"));
    assertThat("queue size after parsed parameter", parameters.size(), is(0));

    parameters.add("++Dfoo=bar");
    assertThat("property not matched", testCase.getParsedOptionProperty(parameters), nullValue());
}