Example usage for java.util Deque remove

List of usage examples for java.util Deque remove

Introduction

In this page you can find the example usage for java.util Deque remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of the queue represented by this deque (in other words, the first element of this deque).

Usage

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely bulk load region targets.
 *///  w ww .  j  a v a2 s. co  m
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw new IllegalStateException(e1);
        }
    }
    return regionGroups;
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

/**
 * @return A Multimap<startkey, LoadQueueItem> that groups LQI by likely
 * bulk load region targets./*from w w  w . j ava  2 s.c o  m*/
 */
private Multimap<ByteBuffer, LoadQueueItem> groupOrSplitPhase(final HTable table, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);

    // drain LQIs and figure out bulk load groups
    Set<Future<List<LoadQueueItem>>> splittingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> splits = groupOrSplit(regionGroups, item, table, startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results.  All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<List<LoadQueueItem>> lqis : splittingFutures) {
        try {
            List<LoadQueueItem> splits = lqis.get();
            if (splits != null) {
                queue.addAll(splits);
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return regionGroups;
}

From source file:org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer.java

/**
 * Store the current region loads./*w w w  . jav  a 2s .  c o  m*/
 */
private synchronized void updateRegionLoad() {
    // We create a new hashmap so that regions that are no longer there are removed.
    // However we temporarily need the old loads so we can use them to keep the rolling average.
    Map<String, Deque<RegionLoad>> oldLoads = loads;
    loads = new HashMap<String, Deque<RegionLoad>>();

    for (ServerName sn : clusterStatus.getServers()) {
        ServerLoad sl = clusterStatus.getLoad(sn);
        if (sl == null) {
            continue;
        }
        for (Entry<byte[], RegionLoad> entry : sl.getRegionsLoad().entrySet()) {
            Deque<RegionLoad> rLoads = oldLoads.get(Bytes.toString(entry.getKey()));
            if (rLoads == null) {
                // There was nothing there
                rLoads = new ArrayDeque<RegionLoad>();
            } else if (rLoads.size() >= 15) {
                rLoads.remove();
            }
            rLoads.add(entry.getValue());
            loads.put(Bytes.toString(entry.getKey()), rLoads);

        }
    }

    for (CostFromRegionLoadFunction cost : regionLoadFunctions) {
        cost.setLoads(loads);
    }
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * @param table the table to load into/* w ww  .  j av a2s  .  c om*/
 * @param pool the ExecutorService
 * @param queue the queue for LoadQueueItem
 * @param startEndKeys start and end keys
 * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
 */
private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(final Table table,
        ExecutorService pool, Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys)
        throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
    Set<String> missingHFiles = new HashSet<>();
    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = new Pair<>(regionGroups, missingHFiles);

    // drain LQIs and figure out bulk load groups
    Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<Pair<List<LoadQueueItem>, String>> call = new Callable<Pair<List<LoadQueueItem>, String>>() {
            @Override
            public Pair<List<LoadQueueItem>, String> call() throws Exception {
                Pair<List<LoadQueueItem>, String> splits = groupOrSplit(regionGroups, item, table,
                        startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
        try {
            Pair<List<LoadQueueItem>, String> splits = lqis.get();
            if (splits != null) {
                if (splits.getFirst() != null) {
                    queue.addAll(splits.getFirst());
                } else {
                    missingHFiles.add(splits.getSecond());
                }
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return pair;
}

From source file:org.structnetalign.merge.DistanceClusterer.java

/**
 *
 * @param graph/*from w  ww  .ja v  a  2  s. c o m*/
 * @param roots
 * @return For each root, the set of other roots that are easily reachable, including the parent root
 */
public final Map<V, Set<V>> transform(Graph<V, E> graph, Collection<V> roots) {

    Map<V, Set<V>> reachableMap = new HashMap<V, Set<V>>();

    for (V root : roots) {

        Set<V> reachable = new HashSet<>();
        HashSet<V> unvisited = new HashSet<V>(graph.getVertices());

        // a map from every vertex to the edge used to get to it
        // works because only visit a vertex once
        HashMap<V, E> edgesTaken = new HashMap<V, E>();

        Deque<V> queue = new LinkedList<V>();
        queue.add(root);
        reachable.add(root);

        while (!queue.isEmpty()) {

            V vertex = queue.remove();
            E edge = edgesTaken.get(vertex);
            unvisited.remove(vertex);

            // stop traversing if we're too far
            if (!isWithinRange(root, vertex))
                continue;

            reachable.add(vertex); // not that this is AFTER the within-range check

            if (edge != null)
                visit(vertex, edge); // this is the ONLY place where we "officially" VISIT a vertex

            Collection<V> neighbors = graph.getNeighbors(vertex);
            for (V neighbor : neighbors) {
                if (unvisited.contains(neighbor)) {
                    queue.add(neighbor);
                    E edgeToNeighbor = graph.findEdge(vertex, neighbor);
                    edgesTaken.put(neighbor, edgeToNeighbor);
                }
            }

            unvisit(vertex, edge); // this is the ONLY place where we "officially" UNVISIT a vertex

        }

        reachableMap.put(root, reachable);

    }

    return reachableMap;
}

From source file:org.sybila.parasim.computation.verification.stl.cpu.AbstractUnaryTemporalMonitor.java

private List<Robustness> precomputeRobustness(Monitor subMonitor, FormulaInterval interval) {
    Deque<Robustness> lemireDeque = new LemireDeque<>(createComparator());
    List<Robustness> precomputed = new ArrayList<>();
    Iterator<Robustness> window = subMonitor.iterator();
    Iterator<Robustness> current = subMonitor.iterator();
    int currentIndex = 0;
    float currentTime = current.next().getTime();
    while (window.hasNext()) {
        Robustness memory = null;//from  w  ww .jav  a 2 s  .  co m
        boolean windowEndReached = false;
        // push new points
        while (window.hasNext() && !windowEndReached) {
            memory = window.next();
            // check whether the time upper bound is reached
            if (memory.getTime() < currentTime + interval.getUpperBound()) {
                lemireDeque.offer(memory);
                memory = null;
            } else if (memory.getTime() == currentTime + interval.getUpperBound()) {
                lemireDeque.offer(memory);
                memory = null;
                windowEndReached = true;
            } else {
                windowEndReached = true;
            }
        }
        // check whether the window end has been reached
        if (!windowEndReached) {
            return precomputed;
        }
        // remove useless points
        while (!lemireDeque.isEmpty()
                && lemireDeque.peekFirst().getTime() < currentTime + interval.getLowerBound()) {
            lemireDeque.remove();
        }
        // get the first robustness in deque
        Robustness found = lemireDeque.peekFirst();
        precomputed.add(new SimpleRobustness(found.getValue(), currentTime, getProperty()));
        currentIndex++;
        currentTime = current.next().getTime();
        if (memory != null) {
            lemireDeque.offer(memory);
            memory = null;
        }
    }
    return precomputed;
}