Example usage for java.util Deque size

List of usage examples for java.util Deque size

Introduction

In this page you can find the example usage for java.util Deque size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this deque.

Usage

From source file:bb.mcmc.analysis.GewekeConvergeStat.java

@Override
protected double calculateEachProgress(Double stat, Deque<Double> record) {

    if (!Double.isNaN(stat)) {
        if (record.size() > 2) {
            record.pop();/*from  w  w w. ja va 2 s.  com*/
        }
        record.add(stat);
    }
    double avgStat = 0;
    for (double d : record) {
        avgStat += d;
    }
    avgStat /= record.size();

    //      final double progress = Math.exp( rafteryThreshold - avgStat );

    //      return progress;

    final double progress = (1 - nd.cumulativeProbability(Math.abs(avgStat))) / gewekeProgressThreshold;
    //         final double tempP = (1-nd.cumulativeProbability(Math.abs(gewekeStat)-gewekeThreshold))/0.5;
    //         R Code
    //         data<- seq(1.96,4,by=0.01)
    //         plot(data, 1-(pnorm(abs(data))-pnorm(1.96))/0.025, type="l", col=2)
    //         plot(data, (1-pnorm(data-1.96))/0.5, type="l", col=2)

    return progress;
}

From source file:com.spotify.helios.agent.QueueingHistoryWriter.java

public QueueingHistoryWriter(final String hostname, final ZooKeeperClient client, final Path backingFile)
        throws IOException, InterruptedException {
    this.hostname = hostname;
    this.client = client;
    this.backingStore = PersistentAtomicReference.create(backingFile,
            new TypeReference<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
            }, new Supplier<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
                @Override/*  w ww.  j  a va2  s .c o m*/
                public ConcurrentMap<JobId, Deque<TaskStatusEvent>> get() {
                    return Maps.newConcurrentMap();
                }
            });
    this.items = backingStore.get();

    // Clean out any errant null values.  Normally shouldn't have any, but we did have a few
    // where it happened, and this will make sure we can get out of a bad state if we get into it.
    final ImmutableSet<JobId> curKeys = ImmutableSet.copyOf(this.items.keySet());
    for (JobId key : curKeys) {
        if (this.items.get(key) == null) {
            this.items.remove(key);
        }
    }

    int itemCount = 0;
    for (Deque<TaskStatusEvent> deque : items.values()) {
        itemCount += deque.size();
    }
    this.count = new AtomicInteger(itemCount);
}

From source file:com.spotify.helios.agent.TaskHistoryWriter.java

public TaskHistoryWriter(final String hostname, final ZooKeeperClient client, final Path backingFile)
        throws IOException, InterruptedException {
    this.hostname = hostname;
    this.client = client;
    this.backingStore = PersistentAtomicReference.create(backingFile,
            new TypeReference<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
            }, new Supplier<ConcurrentMap<JobId, Deque<TaskStatusEvent>>>() {
                @Override/*from   ww w. ja va  2s . c  o  m*/
                public ConcurrentMap<JobId, Deque<TaskStatusEvent>> get() {
                    return Maps.newConcurrentMap();
                }
            });
    this.items = backingStore.get();

    // Clean out any errant null values.  Normally shouldn't have any, but we did have a few
    // where it happened, and this will make sure we can get out of a bad state if we get into it.
    final ImmutableSet<JobId> curKeys = ImmutableSet.copyOf(this.items.keySet());
    for (final JobId key : curKeys) {
        if (this.items.get(key) == null) {
            this.items.remove(key);
        }
    }

    int itemCount = 0;
    for (final Deque<TaskStatusEvent> deque : items.values()) {
        itemCount += deque.size();
    }
    this.count = new AtomicInteger(itemCount);
}

From source file:com.spotify.helios.agent.QueueingHistoryWriter.java

private void putBack(TaskStatusEvent event) {
    final JobId key = event.getStatus().getJob().getId();
    final Deque<TaskStatusEvent> queue = getDeque(key);
    synchronized (queue) {
        if (queue.size() >= MAX_QUEUE_SIZE) {
            // already full, just toss the event
            return;
        }/* ww w.ja v a2  s.co m*/
        queue.push(event);
        count.incrementAndGet();
    }
}

From source file:io.cloudslang.lang.compiler.CompileAsyncLoopFlowTest.java

@Test
public void testPreCompileAsyncLoopFlowNavigate() throws Exception {
    Deque<Task> tasks = getTasksAfterPrecompileFlow("/loops/async_loop/async_loop_navigate.sl");
    assertEquals(2, tasks.size());

    Task asyncTask = tasks.getFirst();//from w ww  . j ava  2s.  c  o  m

    verifyAsyncLoopStatement(asyncTask);

    List<Output> aggregateValues = getAggregateOutputs(asyncTask);
    assertEquals(0, aggregateValues.size());

    List<Output> publishValues = getPublishOutputs(asyncTask);
    assertEquals("aggregate list is not empty", 0, publishValues.size());

    Map<String, String> expectedNavigationStrings = new HashMap<>();
    expectedNavigationStrings.put("SUCCESS", "print_list");
    expectedNavigationStrings.put("FAILURE", "FAILURE");
    verifyNavigationStrings(expectedNavigationStrings, asyncTask);

    assertTrue(asyncTask.isAsync());
}

From source file:io.cloudslang.lang.compiler.CompileAsyncLoopFlowTest.java

@Test
public void testPreCompileAsyncLoopFlowAggregateNavigate() throws Exception {
    Deque<Task> tasks = getTasksAfterPrecompileFlow("/loops/async_loop/async_loop_aggregate_navigate.sl");
    assertEquals(2, tasks.size());

    Task asyncTask = tasks.getFirst();/* ww w  .  j a  v  a 2  s.  co  m*/

    verifyAsyncLoopStatement(asyncTask);

    List<Output> aggregateValues = getAggregateOutputs(asyncTask);
    assertEquals(2, aggregateValues.size());
    assertEquals("${ map(lambda x:str(x['name']), branches_context) }", aggregateValues.get(0).getValue());

    List<Output> publishValues = getPublishOutputs(asyncTask);
    assertEquals("aggregate list is not empty", 2, publishValues.size());
    assertEquals("${name}", publishValues.get(0).getValue());

    Map<String, String> expectedNavigationStrings = new HashMap<>();
    expectedNavigationStrings.put("SUCCESS", "print_list");
    expectedNavigationStrings.put("FAILURE", "FAILURE");
    verifyNavigationStrings(expectedNavigationStrings, asyncTask);

    assertTrue(asyncTask.isAsync());
}

From source file:com.ebay.pulsar.metric.processor.MetricProcessor.java

@Override
public void sendEvent(JetstreamEvent event) throws EventException {
    incrementEventRecievedCounter();//from  w ww  .  java2  s  .c  om
    String eventType = event.getEventType();

    Deque<DataPoint> dataByType = dataBuffer.get(eventType);
    if (dataByType == null) {
        dataByType = new ConcurrentLinkedDeque<DataPoint>();
        dataBuffer.put(eventType, dataByType);
    }

    Long currentTimestamp = System.currentTimeMillis();
    boolean isNewBatchOfEvents = false;

    if (dataByType.size() > 0
            && event.get("context_id") != dataByType.getLast().getEvents().peek().get("context_id"))
        isNewBatchOfEvents = true;

    //Flush old batchs
    if (isNewBatchOfEvents) {
        broadcastEventsByType(eventType, dataByType);
        incrementEventSentCounter();
    }

    if (dataByType.size() == 0 || isNewBatchOfEvents) {
        DataPoint dataPoint = new DataPoint(currentTimestamp);
        dataByType.add(dataPoint);
    }

    dataByType.getLast().addEvent(event);
    int maxLength = GENERIC_MAX_POINT;
    if (eventType.equalsIgnoreCase("MC_Metric") || eventType.equalsIgnoreCase("TwitterEventCount")) {
        maxLength = PAGE_VIEWS_POINT;
    }
    if (dataByType.size() > maxLength) {
        dataByType.removeFirst();
    }
}

From source file:com.spotify.helios.agent.QueueingHistoryWriter.java

private void add(TaskStatusEvent item) throws InterruptedException {
    // If too many "globally", toss them
    while (count.get() >= MAX_TOTAL_SIZE) {
        getNext();//from   ww w  .java 2 s . com
    }

    final JobId key = item.getStatus().getJob().getId();
    final Deque<TaskStatusEvent> deque = getDeque(key);

    synchronized (deque) {
        // if too many in the particular deque, toss them
        while (deque.size() >= MAX_QUEUE_SIZE) {
            deque.remove();
            count.decrementAndGet();
        }
        deque.add(item);
        count.incrementAndGet();
    }

    try {
        backingStore.set(items);
    } catch (ClosedByInterruptException e) {
        log.debug("Writing task status event to backing store was interrupted");
    } catch (IOException e) { // We are best effort after all...
        log.warn("Failed to write task status event to backing store", e);
    }
}

From source file:com.roche.sequencing.bioinformatics.common.utils.FileUtil.java

/**
 * taken from here http://codereview.stackexchange.com/questions/47923/simplifying-a-path -Kurt Heilman
 * //w w  w  . ja  v  a 2 s.  co m
 * @param path
 * @return
 */
public static String simplifyPath(String path) {
    String simplifiedPath = null;
    Deque<String> pathDeterminer = new ArrayDeque<String>();
    path = path.replaceAll(Pattern.quote("\\"), "/");
    path = path.replaceAll(Pattern.quote("\\\\"), "/");
    String[] pathSplitter = path.split("/");
    StringBuilder absolutePath = new StringBuilder();
    for (String term : pathSplitter) {
        if (term == null || term.length() == 0 || term.equals(".")) {
            /* ignore these guys */
        } else if (term.equals("..")) {
            if (pathDeterminer.size() > 0) {
                pathDeterminer.removeLast();
            }
        } else {
            pathDeterminer.addLast(term);
        }
    }
    if (pathDeterminer.isEmpty()) {
        simplifiedPath = "/";
    } else {
        while (!pathDeterminer.isEmpty()) {
            absolutePath.insert(0, pathDeterminer.removeLast());
            absolutePath.insert(0, "/");
        }
        simplifiedPath = absolutePath.toString();
    }
    return simplifiedPath;
}

From source file:io.anserini.index.IndexWebCollection.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    LOG.info("Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(new EnglishAnalyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(512);//from ww  w.  ja v  a  2s .com
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads);
    final String suffix = Collection.GOV2.equals(collection) ? ".gz" : ".warc.gz";
    final Deque<Path> warcFiles = discoverWarcFiles(docDir, suffix);

    if (doclimit > 0 && warcFiles.size() < doclimit)
        for (int i = doclimit; i < warcFiles.size(); i++)
            warcFiles.removeFirst();

    long totalWarcFiles = warcFiles.size();
    LOG.info(totalWarcFiles + " many " + suffix + " files found under the docs path : " + docDir.toString());

    for (int i = 0; i < 2000; i++) {
        if (!warcFiles.isEmpty())
            executor.execute(new IndexerThread(writer, warcFiles.removeFirst()));
        else {
            if (!executor.isShutdown()) {
                Thread.sleep(30000);
                executor.shutdown();
            }
            break;
        }
    }

    long first = 0;
    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(1, TimeUnit.MINUTES)) {

            final long completedTaskCount = executor.getCompletedTaskCount();

            LOG.info(String.format("%.2f percentage completed",
                    (double) completedTaskCount / totalWarcFiles * 100.0d));

            if (!warcFiles.isEmpty())
                for (long i = first; i < completedTaskCount; i++) {
                    if (!warcFiles.isEmpty())
                        executor.execute(new IndexerThread(writer, warcFiles.removeFirst()));
                    else {
                        if (!executor.isShutdown())
                            executor.shutdown();
                    }
                }

            first = completedTaskCount;
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    if (totalWarcFiles != executor.getCompletedTaskCount())
        throw new RuntimeException("totalWarcFiles = " + totalWarcFiles
                + " is not equal to completedTaskCount =  " + executor.getCompletedTaskCount());

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (optimize)
            writer.forceMerge(1);
    } finally {
        writer.close();
    }

    return numIndexed;
}