Example usage for java.util ArrayDeque ArrayDeque

List of usage examples for java.util ArrayDeque ArrayDeque

Introduction

In this page you can find the example usage for java.util ArrayDeque ArrayDeque.

Prototype

public ArrayDeque() 

Source Link

Document

Constructs an empty array deque with an initial capacity sufficient to hold 16 elements.

Usage

From source file:com.google.gwt.emultest.java.util.ArrayDequeTest.java

public void testOffer() {
    Object o1 = new Object();
    Object o2 = new Object();
    Object o3 = new Object();

    ArrayDeque<Object> deque = new ArrayDeque<>();
    assertTrue(deque.offer(o1));// w w w. jav a  2  s  .c o m
    checkDequeSizeAndContent(deque, o1);
    assertTrue(deque.offer(o2));
    checkDequeSizeAndContent(deque, o1, o2);
    assertTrue(deque.offer(o3));
    checkDequeSizeAndContent(deque, o1, o2, o3);

    try {
        deque.offer(null);
        fail();
    } catch (NullPointerException expected) {
    }
}

From source file:WorkQueue.java

/**
 * Creates a thread that continuously dequeues {@code threadLocalItems} from
 * {@code workQueue} at once and excutes them sequentially.
 *
 * @param threadLocalItems the number of items this thread should dequeue
 *        from the work queue at one time.  Setting this value too high can
 *        result in a loss of concurrency; setting it too low can result in
 *        high contention on the work queue if the time per task is also
 *        low./*from   w  ww .jav a 2s. c om*/
 */
public WorkerThread(BlockingQueue<Runnable> workQueue, int threadLocalItems) {
    this.workQueue = workQueue;
    this.threadLocalItems = threadLocalItems;
    internalQueue = new ArrayDeque<Runnable>();
    setDaemon(true);
    synchronized (WorkerThread.class) {
        setName("WorkerThread-" + (threadInstanceCount++));
    }
}

From source file:org.lilyproject.repository.impl.HBaseTypeManager.java

@Override
public RecordType updateRecordType(RecordType recordType, boolean refreshSubtypes)
        throws RepositoryException, InterruptedException {
    return updateRecordType(recordType, refreshSubtypes, new ArrayDeque<SchemaId>());
}

From source file:org.shaman.terrain.polygonal.GraphToHeightmap.java

private void calculateBaseElevation() {
    //assign elevation to oceans
    for (Graph.Corner c : graph.corners) {
        if (c.ocean) {
            c.elevation = -1;//from  w w w .ja va 2 s .  c om
        }
    }
    Queue<Graph.Corner> q = new ArrayDeque<>();
    for (Graph.Corner c : graph.corners) {
        if (c.coast) {
            q.add(c);
        }
    }
    while (!q.isEmpty()) {
        Graph.Corner c = q.poll();
        for (Graph.Corner r : c.adjacent) {
            float h = Math.max(-1, c.elevation - 0.2f);
            if (r.ocean && r.elevation < h) {
                r.elevation = h;
                q.add(r);
            }
        }
    }
    assignCenterElevations();
    //render
    Geometry geom = createElevationGeometry();
    Heightmap tmp = new Heightmap(size);
    render(tmp.getRawData(), geom, ColorRGBA.Black, -1, 1);
    //scale
    for (int x = 0; x < size; ++x) {
        for (int y = 0; y < size; ++y) {
            float h = tmp.getHeightAt(x, y);
            h = (float) (Math.signum(h) * Math.pow(Math.abs(h), HEIGHT_SCALING));
            tmp.setHeightAt(x, y, h);
        }
    }
    //distort
    Noise distortionNoise = new Noise(rand.nextLong());
    for (int x = 0; x < size; ++x) {
        for (int y = 0; y < size; ++y) {
            float s = x / (float) size;
            float t = y / (float) size;
            float ss = (float) (s + DISTORTION_AMPLITUDE * 2
                    * distortionNoise.noise(s * DISTORTION_FREQUENCY, t * DISTORTION_FREQUENCY, 0));
            float tt = (float) (t + DISTORTION_AMPLITUDE * 2
                    * distortionNoise.noise(s * DISTORTION_FREQUENCY, t * DISTORTION_FREQUENCY, 3.4));
            float v = tmp.getHeightInterpolating(ss * size, tt * size);
            heightmap.setHeightAt(x, y, v);
        }
    }
    //smooth
    for (int i = 0; i < SMOOTHING_STEPS; ++i) {
        smooth(heightmap);
    }
    //reset height
    for (Graph.Corner c : graph.corners) {
        if (c.ocean) {
            c.elevation = 0;
        }
    }
    assignCenterElevations();
    LOG.info("base elevation assigned");
}

From source file:com.github.braully.graph.hn.GraphWS.java

public boolean checkIfHullSet(UndirectedSparseGraphTO<Integer, Integer> graph, int[] currentSet) {
    if (currentSet == null || currentSet.length == 0) {
        return false;
    }/*  w  w  w. j a  v a2  s  .c  om*/
    Set<Integer> fecho = new HashSet<>();
    Collection vertices = graph.getVertices();
    int[] aux = new int[graph.getVertexCount()];
    for (int i = 0; i < aux.length; i++) {
        aux[i] = 0;
    }

    Queue<Integer> mustBeIncluded = new ArrayDeque<>();
    for (Integer v : currentSet) {
        mustBeIncluded.add(v);
    }
    while (!mustBeIncluded.isEmpty()) {
        Integer verti = mustBeIncluded.remove();
        fecho.add(verti);
        aux[verti] = INCLUDED;
        Collection<Integer> neighbors = graph.getNeighbors(verti);
        for (int vertn : neighbors) {
            if (vertn != verti) {
                int previousValue = aux[vertn];
                aux[vertn] = aux[vertn] + NEIGHBOOR_COUNT_INCLUDED;
                if (previousValue < INCLUDED && aux[vertn] >= INCLUDED) {
                    //                        includeVertex(graph, fecho, aux, verti);
                    mustBeIncluded.add(vertn);
                }
            }
        }
    }

    //        for (int i : currentSet) {
    //            includeVertex(graph, fecho, aux, i);
    //        }
    return fecho.size() == graph.getVertexCount();
}

From source file:com.google.gwt.emultest.java.util.ArrayDequeTest.java

public void testOfferFirst() {
    Object o1 = new Object();
    Object o2 = new Object();
    Object o3 = new Object();

    ArrayDeque<Object> deque = new ArrayDeque<>();
    assertTrue(deque.offerFirst(o1));/*from   w w w. jav  a 2  s.c om*/
    checkDequeSizeAndContent(deque, o1);
    assertTrue(deque.offerFirst(o2));
    checkDequeSizeAndContent(deque, o2, o1);
    assertTrue(deque.offerFirst(o3));
    checkDequeSizeAndContent(deque, o3, o2, o1);

    try {
        deque.offerFirst(null);
        fail();
    } catch (NullPointerException expected) {
    }
}

From source file:sadl.models.pdrta.PDRTA.java

public void toDOTLang(Appendable ap, double minP, boolean withInput, StateColoring sc) {

    // Write transitions with high probability
    final StringBuilder sb = new StringBuilder();
    final Queue<PDRTAState> q = new ArrayDeque<>();
    final Set<PDRTAState> found = new HashSet<>();
    q.add(root);/*  ww w . j  ava 2s  .c o  m*/
    found.add(root);
    while (!q.isEmpty()) {
        final PDRTAState s = q.remove();
        for (int i = 0; i < input.getAlphSize(); i++) {
            final Set<Entry<Integer, Interval>> ins = s.getIntervals(i).entrySet();
            for (final Entry<Integer, Interval> eIn : ins) {
                final Interval in = eIn.getValue();
                final double p = s.getStat().getTransProb(i, in);
                final PDRTAState t = in.getTarget();
                if (t != null && p >= minP) {
                    if (!found.contains(t)) {
                        q.add(t);
                        found.add(t);
                    }
                    // Write transition
                    sb.append(s.getIndex());
                    sb.append(" -> ");
                    sb.append(t.getIndex());
                    sb.append(" [ label = \"");
                    sb.append(getSymbol(i));
                    sb.append(" [");
                    sb.append(in.getBegin());
                    sb.append(", ");
                    sb.append(in.getEnd());
                    sb.append("] p=");
                    sb.append(p);
                    if (withInput) {
                        sb.append(" n=");
                        sb.append(in.getTails().size());
                    }
                    sb.append("\" ];\n");
                }
            }
        }
    }

    try {
        writeStatData(ap, found);

        // Write automaton in DOT language
        ap.append("digraph PDRTA {\n");
        ap.append("rankdir=LR;\n");
        ap.append("node[style = filled, fillcolor = white, shape = circle];\n");
        ap.append("\"\"[style = invis, shape = none, margin = 0, width = 0, heigth = 0];\n");
        ap.append("\"\" -> 0;\n");

        // Write states
        for (final PDRTAState s : states.valueCollection()) {
            if (found.contains(s)) {
                ap.append(Integer.toString(s.getIndex()));
                ap.append(" [ xlabel = \"");
                ap.append(Double.toString(s.getStat().getTailEndProb()));
                ap.append("\"");
                if (sc != null) {
                    if (sc.isRed(s)) {
                        ap.append(", fillcolor = \"#FFA9A9\"");
                    } else if (sc.isBlue(s)) {
                        ap.append(", fillcolor = \"#A9D1FF\"");
                    }
                }
                ap.append(" ];\n");
            }
        }

        // Add transitions
        ap.append(sb.toString());

        ap.append("}");
    } catch (final IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.java

/**
 * Store the current region loads./*from w  ww . j  a v  a2  s.c om*/
 */
private synchronized void updateRegionLoad() {
    // We create a new hashmap so that regions that are no longer there are removed.
    // However we temporarily need the old loads so we can use them to keep the rolling average.
    Map<String, Deque<RegionLoad>> oldLoads = loads;
    loads = new HashMap<String, Deque<RegionLoad>>();

    for (ServerName sn : clusterStatus.getServers()) {
        ServerLoad sl = clusterStatus.getLoad(sn);
        if (sl == null) {
            continue;
        }
        for (Entry<byte[], RegionLoad> entry : sl.getRegionsLoad().entrySet()) {
            Deque<RegionLoad> rLoads = oldLoads.get(Bytes.toString(entry.getKey()));
            if (rLoads == null) {
                // There was nothing there
                rLoads = new ArrayDeque<RegionLoad>();
            } else if (rLoads.size() >= 15) {
                rLoads.remove();
            }
            rLoads.add(entry.getValue());
            loads.put(Bytes.toString(entry.getKey()), rLoads);

        }
    }

    for (CostFromRegionLoadFunction cost : regionLoadFunctions) {
        cost.setLoads(loads);
    }
}

From source file:com.streamsets.datacollector.definition.ConfigDefinitionExtractor.java

void resolveDependencies(String configPrefix, List<ConfigDefinition> defs, Object contextMsg) {
    Map<String, ConfigDefinition> definitionsMap = new HashMap<>();
    Map<String, Map<String, Set<Object>>> dependencyMap = new HashMap<>();
    Map<String, Boolean> isFullyProcessed = new HashMap<>();
    for (ConfigDefinition def : defs) {
        definitionsMap.put(def.getName(), def);
        dependencyMap.put(def.getName(), new HashMap<String, Set<Object>>());
        isFullyProcessed.put(def.getName(), false);
    }/* w ww  .  j  av a2 s. c om*/

    cycles.clear();

    for (ConfigDefinition def : defs) {
        String dependsOnKey = def.getDependsOn();
        if (!StringUtils.isEmpty(dependsOnKey)) {
            verifyDependencyExists(definitionsMap, def, dependsOnKey, contextMsg);
            ConfigDefinition dependsOnDef = definitionsMap.get(dependsOnKey);
            // evaluate dependsOn triggers
            ConfigDef annotation = def.getConfigField().getAnnotation(ConfigDef.class);
            Set<Object> triggers = new HashSet<>();
            for (String trigger : annotation.triggeredByValue()) {
                triggers.add(ConfigValueExtractor.get().extract(dependsOnDef.getConfigField(),
                        dependsOnDef.getType(), trigger, contextMsg, true));
            }
            dependencyMap.get(def.getName()).put(dependsOnDef.getName(), triggers);
        }
        // Add direct dependencies to dependencyMap
        if (!def.getDependsOnMap().isEmpty()) {
            // Copy same as above.
            for (Map.Entry<String, List<Object>> dependsOn : def.getDependsOnMap().entrySet()) {
                dependsOnKey = dependsOn.getKey();
                if (!StringUtils.isEmpty(dependsOnKey)) {
                    verifyDependencyExists(definitionsMap, def, dependsOnKey, contextMsg);
                    Set<Object> triggers = new HashSet<>();
                    ConfigDefinition dependsOnDef = definitionsMap.get(dependsOnKey);
                    for (Object trigger : dependsOn.getValue()) {
                        triggers.add(ConfigValueExtractor.get().extract(dependsOnDef.getConfigField(),
                                dependsOnDef.getType(), (String) trigger, contextMsg, true));
                    }
                    Map<String, Set<Object>> dependencies = dependencyMap.get(def.getName());
                    if (dependencies.containsKey(dependsOnKey)) {
                        dependencies.get(dependsOnKey).addAll(triggers);
                    } else {
                        dependencies.put(dependsOnKey, triggers);
                    }
                }
            }
        }
    }

    for (ConfigDefinition def : defs) {

        if (isFullyProcessed.get(def.getName())) {
            continue;
        }
        // Now find all indirect dependencies
        Deque<StackNode> stack = new ArrayDeque<>();
        stack.push(new StackNode(def, new LinkedHashSet<String>()));
        while (!stack.isEmpty()) {
            StackNode current = stack.peek();
            // We processed this one's dependencies before, don't bother adding its children
            // The dependencies of this one have all been processed
            if (current.childrenAddedToStack) {
                stack.pop();
                Map<String, Set<Object>> currentDependencies = dependencyMap.get(current.def.getName());
                Set<String> children = new HashSet<>(current.def.getDependsOnMap().keySet());
                for (String child : children) {
                    if (StringUtils.isEmpty(child)) {
                        continue;
                    }
                    Map<String, Set<Object>> depsOfChild = dependencyMap.get(child);
                    for (Map.Entry<String, Set<Object>> depOfChild : depsOfChild.entrySet()) {
                        if (currentDependencies.containsKey(depOfChild.getKey())) {
                            // Add only the common trigger values,
                            // since it has to be one of those for both these to be triggered.
                            Set<Object> currentTriggers = currentDependencies.get(depOfChild.getKey());
                            Set<Object> childTriggers = depOfChild.getValue();
                            currentDependencies.put(depOfChild.getKey(),
                                    Sets.intersection(currentTriggers, childTriggers));
                        } else {
                            currentDependencies.put(depOfChild.getKey(), new HashSet<>(depOfChild.getValue()));
                        }
                    }
                }
                isFullyProcessed.put(current.def.getName(), true);
            } else {
                Set<String> children = current.def.getDependsOnMap().keySet();
                String dependsOn = current.def.getDependsOn();
                LinkedHashSet<String> dependencyAncestors = new LinkedHashSet<>(current.ancestors);
                dependencyAncestors.add(current.def.getName());
                if (!StringUtils.isEmpty(dependsOn) && !isFullyProcessed.get(current.def.getDependsOn())
                        && !detectCycle(dependencyAncestors, cycles, dependsOn)) {
                    stack.push(
                            new StackNode(definitionsMap.get(current.def.getDependsOn()), dependencyAncestors));
                }
                for (String child : children) {
                    if (!StringUtils.isEmpty(child) && !isFullyProcessed.get(child)
                            && !detectCycle(dependencyAncestors, cycles, child)) {
                        stack.push(new StackNode(definitionsMap.get(child), dependencyAncestors));
                    }
                }
                current.childrenAddedToStack = true;
            }
        }
    }
    Preconditions.checkState(cycles.isEmpty(),
            "The following cycles were detected in the configuration dependencies:\n"
                    + Joiner.on("\n").join(cycles));
    for (Map.Entry<String, Map<String, Set<Object>>> entry : dependencyMap.entrySet()) {
        Map<String, List<Object>> dependencies = new HashMap<>();
        definitionsMap.get(entry.getKey()).setDependsOnMap(dependencies);
        for (Map.Entry<String, Set<Object>> trigger : entry.getValue().entrySet()) {
            List<Object> triggerValues = new ArrayList<>();
            triggerValues.addAll(trigger.getValue());
            dependencies.put(trigger.getKey(), triggerValues);
        }
        definitionsMap.get(entry.getKey()).setDependsOn("");
    }
}

From source file:org.apache.giraph.comm.flow_control.CreditBasedFlowControl.java

@Override
public void sendRequest(int destTaskId, WritableRequest request) {
    Pair<AdjustableSemaphore, Integer> pair = perWorkerOpenRequestMap.get(destTaskId);
    // Check if this is the first time sending a request to a worker. If so, we
    // should the worker id to necessary bookkeeping data structure.
    if (pair == null) {
        pair = new MutablePair<>(new AdjustableSemaphore(maxOpenRequestsPerWorker), -1);
        Pair<AdjustableSemaphore, Integer> temp = perWorkerOpenRequestMap.putIfAbsent(destTaskId, pair);
        perWorkerUnsentRequestMap.putIfAbsent(destTaskId, new ArrayDeque<WritableRequest>());
        resumeRequestsId.putIfAbsent(destTaskId, Sets.<Long>newConcurrentHashSet());
        if (temp != null) {
            pair = temp;//  w w  w  .j a  v a2s. c  o m
        }
    }
    AdjustableSemaphore openRequestPermit = pair.getLeft();
    // Try to reserve a spot for the request amongst the open requests of
    // the destination worker.
    boolean shouldSend = openRequestPermit.tryAcquire();
    boolean shouldCache = false;
    while (!shouldSend) {
        // We should not send the request, and should cache the request instead.
        // It may be possible that the unsent message cache is also full, so we
        // should try to acquire a space on the cache, and if there is no extra
        // space in unsent request cache, we should wait until some space
        // become available. However, it is possible that during the time we are
        // waiting on the unsent messages cache, actual buffer for open requests
        // frees up space.
        try {
            shouldCache = unsentRequestPermit.tryAcquire(unsentWaitMsecs, TimeUnit.MILLISECONDS);
        } catch (InterruptedException e) {
            throw new IllegalStateException(
                    "shouldSend: failed " + "while waiting on the unsent request cache to have some more "
                            + "room for extra unsent requests!");
        }
        if (shouldCache) {
            break;
        }
        // We may have an open spot in the meantime that we were waiting on the
        // unsent requests.
        shouldSend = openRequestPermit.tryAcquire();
        if (shouldSend) {
            break;
        }
        // The current thread will be at this point only if it could not make
        // space amongst open requests for the destination worker and has been
        // timed-out in trying to acquire a space amongst unsent messages. So,
        // we should report logs, report progress, and check for request
        // failures.
        nettyClient.logAndSanityCheck();
    }
    // Either shouldSend == true or shouldCache == true
    if (shouldCache) {
        Deque<WritableRequest> unsentRequests = perWorkerUnsentRequestMap.get(destTaskId);
        // This synchronize block is necessary for the following reason:
        // Once we are at this point, it means there was no room for this
        // request to become an open request, hence we have to put it into
        // unsent cache. Consider the case that since last time we checked if
        // there is any room for an additional open request so far, all open
        // requests are delivered and their acknowledgements are also processed.
        // Now, if we put this request in the unsent cache, it is not being
        // considered to become an open request, as the only one who checks
        // on this matter would be the one who receives an acknowledgment for an
        // open request for the destination worker. So, a lock is necessary
        // to forcefully serialize the execution if this scenario is about to
        // happen.
        synchronized (unsentRequests) {
            shouldSend = openRequestPermit.tryAcquire();
            if (!shouldSend) {
                aggregateUnsentRequests.getAndIncrement();
                unsentRequests.add(request);
                return;
            }
        }
        // We found a spot amongst open requests to send this request. So, this
        // request won't be cached anymore.
        unsentRequestPermit.release();
    }
    nettyClient.doSend(destTaskId, request);
}