Example usage for java.util Queue offer

List of usage examples for java.util Queue offer

Introduction

In this page you can find the example usage for java.util Queue offer.

Prototype

boolean offer(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions.

Usage

From source file:bwem.Graph.java

private int[] computeDistances(final ChokePoint start, final List<ChokePoint> targets) {
    final int[] distances = new int[targets.size()];

    TileImpl.getStaticMarkable().unmarkAll();

    final Queue<Pair<Integer, ChokePoint>> toVisit = new PriorityQueue<>(Comparator.comparingInt(a -> a.first));
    toVisit.offer(new Pair<>(0, start));

    int remainingTargets = targets.size();
    while (!toVisit.isEmpty()) {
        final Pair<Integer, ChokePoint> distanceAndChokePoint = toVisit.poll();
        final int currentDist = distanceAndChokePoint.first;
        final ChokePoint current = distanceAndChokePoint.second;
        final Tile currentTile = getMap().getData().getTile(current.getCenter().toTilePosition(),
                CheckMode.NO_CHECK);/*w ww.j  av a2 s  . com*/
        //            bwem_assert(currentTile.InternalData() == currentDist);
        if (!(((TileImpl) currentTile).getInternalData() == currentDist)) {
            throw new IllegalStateException();
        }
        ((TileImpl) currentTile).setInternalData(0); // resets Tile::m_internalData for future usage
        ((TileImpl) currentTile).getMarkable().setMarked();

        for (int i = 0; i < targets.size(); ++i) {
            if (current == targets.get(i)) {
                distances[i] = currentDist;
                --remainingTargets;
            }
        }
        if (remainingTargets == 0) {
            break;
        }

        if (current.isBlocked() && (!current.equals(start))) {
            continue;
        }

        for (final Area pArea : new Area[] { current.getAreas().getFirst(), current.getAreas().getSecond() }) {
            for (final ChokePoint next : pArea.getChokePoints()) {
                if (!next.equals(current)) {
                    final int newNextDist = currentDist + distance(current, next);
                    final Tile nextTile = getMap().getData().getTile(next.getCenter().toTilePosition(),
                            CheckMode.NO_CHECK);
                    if (!((TileImpl) nextTile).getMarkable().isMarked()) {
                        if (((TileImpl) nextTile).getInternalData() != 0) { // next already in toVisit
                            if (newNextDist < ((TileImpl) nextTile).getInternalData()) { // nextNewDist < nextOldDist
                                                                                         // To update next's distance, we need to remove-insert it from toVisit:
                                                                                         //                                    bwem_assert(iNext != range.second);
                                final boolean removed = toVisit
                                        .remove(new Pair<>(((TileImpl) nextTile).getInternalData(), next));
                                if (!removed) {
                                    throw new IllegalStateException();
                                }
                                ((TileImpl) nextTile).setInternalData(newNextDist);
                                ((ChokePointImpl) next).setPathBackTrace(current);
                                toVisit.offer(new Pair<>(newNextDist, next));
                            }
                        } else {
                            ((TileImpl) nextTile).setInternalData(newNextDist);
                            ((ChokePointImpl) next).setPathBackTrace(current);
                            toVisit.offer(new Pair<>(newNextDist, next));
                        }
                    }
                }
            }
        }
    }

    //    //   bwem_assert(!remainingTargets);
    //        if (!(remainingTargets == 0)) {
    //            throw new IllegalStateException();
    //        }

    // reset Tile::m_internalData for future usage
    for (Pair<Integer, ChokePoint> distanceToChokePoint : toVisit) {
        ((TileImpl) getMap().getData().getTile(distanceToChokePoint.second.getCenter().toTilePosition(),
                CheckMode.NO_CHECK)).setInternalData(0);
    }

    return distances;
}

From source file:com.mgmtp.jfunk.common.util.Configuration.java

/**
 * If properties are present which start with {@link JFunkConstants#SYSTEM_PROPERTIES} the
 * corresponding values are taken as property files and loaded here.
 *///w ww .  j ava 2  s  . c  om
private void loadExtraFiles(final String filterPrefix, final boolean preserveExisting) {
    Map<String, String> view = Maps.filterKeys(this, Predicates.startsWith(filterPrefix));
    while (true) {
        if (view.isEmpty()) {
            break;
        }

        Queue<String> fileKeys = Queues.newArrayDeque(view.values());

        // we need to keep them separately in order to be able to reload them (see put method)
        extraFileProperties.addAll(fileKeys);

        // Remove original keys in order to prevent a stack overflow
        view.clear();

        for (String fileNameKey = null; (fileNameKey = fileKeys.poll()) != null;) {
            // Recursion
            String fileName = processPropertyValue(fileNameKey);
            if (PLACEHOLDER_PATTERN.matcher(fileName).find()) {
                // not all placeholders were resolved, so we enqueue it again to process another file first
                fileKeys.offer(fileName);
            } else {
                load(fileName, preserveExisting);
            }
        }
    }
}

From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java

@Override
public void applyAssignments(List<DrillbitEndpoint> endpoints) throws PhysicalOperatorSetupException {
    logger.debug("Incoming endpoints :" + endpoints);
    watch.reset();/* w w  w  .  j a v  a2s .c  o  m*/
    watch.start();

    final int numSlots = endpoints.size();
    int totalAssignmentsTobeDone = chunksMapping.size();

    Preconditions.checkArgument(numSlots <= totalAssignmentsTobeDone, String.format(
            "Incoming endpoints %d is greater than number of chunks %d", numSlots, totalAssignmentsTobeDone));

    final int minPerEndpointSlot = (int) Math.floor((double) totalAssignmentsTobeDone / numSlots);
    final int maxPerEndpointSlot = (int) Math.ceil((double) totalAssignmentsTobeDone / numSlots);

    endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots);
    Map<String, Queue<Integer>> endpointHostIndexListMap = Maps.newHashMap();

    for (int i = 0; i < numSlots; ++i) {
        endpointFragmentMapping.put(i, new ArrayList<MongoSubScanSpec>(maxPerEndpointSlot));
        String hostname = endpoints.get(i).getAddress();
        Queue<Integer> hostIndexQueue = endpointHostIndexListMap.get(hostname);
        if (hostIndexQueue == null) {
            hostIndexQueue = Lists.newLinkedList();
            endpointHostIndexListMap.put(hostname, hostIndexQueue);
        }
        hostIndexQueue.add(i);
    }

    Set<Entry<String, List<ChunkInfo>>> chunksToAssignSet = Sets.newHashSet(chunksInverseMapping.entrySet());

    for (Iterator<Entry<String, List<ChunkInfo>>> chunksIterator = chunksToAssignSet.iterator(); chunksIterator
            .hasNext();) {
        Entry<String, List<ChunkInfo>> chunkEntry = chunksIterator.next();
        Queue<Integer> slots = endpointHostIndexListMap.get(chunkEntry.getKey());
        if (slots != null) {
            for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
                Integer slotIndex = slots.poll();
                List<MongoSubScanSpec> subScanSpecList = endpointFragmentMapping.get(slotIndex);
                subScanSpecList.add(buildSubScanSpecAndGet(chunkInfo));
                slots.offer(slotIndex);
            }
            chunksIterator.remove();
        }
    }

    PriorityQueue<List<MongoSubScanSpec>> minHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots,
            LIST_SIZE_COMPARATOR);
    PriorityQueue<List<MongoSubScanSpec>> maxHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots,
            LIST_SIZE_COMPARATOR_REV);
    for (List<MongoSubScanSpec> listOfScan : endpointFragmentMapping.values()) {
        if (listOfScan.size() < minPerEndpointSlot) {
            minHeap.offer(listOfScan);
        } else if (listOfScan.size() > minPerEndpointSlot) {
            maxHeap.offer(listOfScan);
        }
    }

    if (chunksToAssignSet.size() > 0) {
        for (Entry<String, List<ChunkInfo>> chunkEntry : chunksToAssignSet) {
            for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
                List<MongoSubScanSpec> smallestList = minHeap.poll();
                smallestList.add(buildSubScanSpecAndGet(chunkInfo));
                minHeap.offer(smallestList);
            }
        }
    }

    while (minHeap.peek() != null && minHeap.peek().size() < minPerEndpointSlot) {
        List<MongoSubScanSpec> smallestList = minHeap.poll();
        List<MongoSubScanSpec> largestList = maxHeap.poll();
        smallestList.add(largestList.remove(largestList.size() - 1));
        if (largestList.size() > minPerEndpointSlot) {
            maxHeap.offer(largestList);
        }
        if (smallestList.size() < minPerEndpointSlot) {
            minHeap.offer(smallestList);
        }
    }

    logger.debug("Built assignment map in {} s.\nEndpoints: {}.\nAssignment Map: {}",
            watch.elapsed(TimeUnit.NANOSECONDS) / 1000, endpoints, endpointFragmentMapping.toString());
}

From source file:org.voltdb.iv2.Cartographer.java

private boolean doPartitionsHaveReplicas(int hid) {
    hostLog.debug("Cartographer: Reloading partition information.");
    List<String> partitionDirs = null;
    try {/*  w  w  w.  j  a v  a  2  s . co m*/
        partitionDirs = m_zk.getChildren(VoltZK.leaders_initiators, null);
    } catch (KeeperException | InterruptedException e) {
        return false;
    }

    //Don't fetch the values serially do it asynchronously
    Queue<ZKUtil.ByteArrayCallback> dataCallbacks = new ArrayDeque<>();
    Queue<ZKUtil.ChildrenCallback> childrenCallbacks = new ArrayDeque<>();
    for (String partitionDir : partitionDirs) {
        String dir = ZKUtil.joinZKPath(VoltZK.leaders_initiators, partitionDir);
        try {
            ZKUtil.ByteArrayCallback callback = new ZKUtil.ByteArrayCallback();
            m_zk.getData(dir, false, callback, null);
            dataCallbacks.offer(callback);
            ZKUtil.ChildrenCallback childrenCallback = new ZKUtil.ChildrenCallback();
            m_zk.getChildren(dir, false, childrenCallback, null);
            childrenCallbacks.offer(childrenCallback);
        } catch (Exception e) {
            return false;
        }
    }
    //Assume that we are ksafe
    for (String partitionDir : partitionDirs) {
        int pid = LeaderElector.getPartitionFromElectionDir(partitionDir);
        try {
            //Dont let anyone die if someone is in INITIALIZING state
            byte[] partitionState = dataCallbacks.poll().getData();
            if (partitionState != null && partitionState.length == 1) {
                if (partitionState[0] == LeaderElector.INITIALIZING) {
                    return false;
                }
            }

            List<String> replicas = childrenCallbacks.poll().getChildren();
            //This is here just so callback is polled.
            if (pid == MpInitiator.MP_INIT_PID) {
                continue;
            }
            //Get Hosts for replicas
            final List<Integer> replicaHost = new ArrayList<>();
            boolean hostHasReplicas = false;
            for (String replica : replicas) {
                final String split[] = replica.split("/");
                final long hsId = Long.valueOf(split[split.length - 1].split("_")[0]);
                final int hostId = CoreUtils.getHostIdFromHSId(hsId);
                if (hostId == hid) {
                    hostHasReplicas = true;
                }
                replicaHost.add(hostId);
            }
            hostLog.debug("Replica Host for Partition " + pid + " " + replicaHost);
            if (hostHasReplicas && replicaHost.size() <= 1) {
                return false;
            }
        } catch (InterruptedException | KeeperException | NumberFormatException e) {
            return false;
        }
    }
    return true;
}

From source file:com.taobao.metamorphosis.gregor.slave.OrderedThreadPoolExecutor.java

/**
 * {@inheritDoc}/*from w w w.  j  ava 2s  . co  m*/
 */
@Override
public void execute(final Runnable task) {
    if (this.shutdown) {
        this.rejectTask(task);
    }

    // Check that it's a IoEvent task
    this.checkTaskType(task);

    final IoEvent event = (IoEvent) task;

    // Get the associated session
    final IoCatalog ioCatalog = event.getIoCatalog();

    // Get the session's queue of events
    final TasksQueue connectionTasksQueue = this.getConnectionTasksQueue(ioCatalog);
    final Queue<Runnable> tasksQueue = connectionTasksQueue.tasksQueue;

    // Wether to offer new connection
    boolean offerConnection;

    // Ok, the message has been accepted
    synchronized (tasksQueue) {
        // Inject the event into the executor taskQueue
        tasksQueue.offer(event);

        if (connectionTasksQueue.processingCompleted) {
            connectionTasksQueue.processingCompleted = false;
            offerConnection = true;
        } else {
            offerConnection = false;
        }
    }

    if (offerConnection) {
        // As the tasksQueue was empty, the task has been executed
        // immediately, so we can move the session to the queue
        // of sessions waiting for completion.
        this.waitingIoCatalogs.offer(ioCatalog);
    }

    this.addWorkerIfNecessary();
}

From source file:edu.uci.ics.jung.algorithms.scoring.BetweennessCentrality.java

protected void computeBetweenness(Queue<V> queue, Transformer<E, ? extends Number> edge_weights) {
    for (V v : graph.getVertices()) {
        // initialize the betweenness data for this new vertex
        for (V s : graph.getVertices())
            this.vertex_data.put(s, new BetweennessData());

        //         if (v.equals(new Integer(0)))
        //            System.out.println("pause");

        vertex_data.get(v).numSPs = 1;//from  w  w  w  .j  a v  a 2s. c  om
        vertex_data.get(v).distance = 0;

        Stack<V> stack = new Stack<V>();
        //            Buffer<V> queue = new UnboundedFifoBuffer<V>();
        //            queue.add(v);
        queue.offer(v);

        while (!queue.isEmpty()) {
            //                V w = queue.remove();
            V w = queue.poll();
            stack.push(w);
            BetweennessData w_data = vertex_data.get(w);

            for (E e : graph.getOutEdges(w)) {
                // TODO (jrtom): change this to getOtherVertices(w, e)
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double wx_weight = edge_weights.transform(e).doubleValue();

                //                for(V x : graph.getSuccessors(w)) 
                //                {
                //                   if (x.equals(w))
                //                      continue;

                // FIXME: the other problem is that I need to 
                // keep putting the neighbors of things we've just 
                // discovered in the queue, if they're undiscovered or
                // at greater distance.

                // FIXME: this is the problem, right here, I think: 
                // need to update position in queue if distance changes
                // (which can only happen with weighted edges).
                // for each outgoing edge e from w, get other end x
                // if x not already visited (dist x < 0)
                //   set x's distance to w's dist + edge weight
                //   add x to queue; pri in queue is x's dist
                // if w's dist + edge weight < x's dist 
                //   update x's dist
                //   update x in queue (MapBinaryHeap)
                //   clear x's incoming edge list
                // if w's dist + edge weight = x's dist
                //   add e to x's incoming edge list

                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + wx_weight;

                if (x_data.distance < 0) {
                    //                        queue.add(x);
                    //                        vertex_data.get(x).distance = vertex_data.get(w).distance + 1;
                    x_data.distance = x_potential_dist;
                    queue.offer(x);
                }

                // note:
                // (1) this can only happen with weighted edges
                // (2) x's SP count and incoming edges are updated below 
                if (x_data.distance > x_potential_dist) {
                    x_data.distance = x_potential_dist;
                    // invalidate previously identified incoming edges
                    // (we have a new shortest path distance to x)
                    x_data.incomingEdges.clear();
                    // update x's position in queue
                    ((MapBinaryHeap<V>) queue).update(x);
                }
                //                  if (vertex_data.get(x).distance == vertex_data.get(w).distance + 1) 
                // 
                //                    if (x_data.distance == x_potential_dist) 
                //                    {
                //                        x_data.numSPs += w_data.numSPs;
                ////                        vertex_data.get(x).predecessors.add(w);
                //                        x_data.incomingEdges.add(e);
                //                    }
            }
            for (E e : graph.getOutEdges(w)) {
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double e_weight = edge_weights.transform(e).doubleValue();
                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + e_weight;
                if (x_data.distance == x_potential_dist) {
                    x_data.numSPs += w_data.numSPs;
                    //                        vertex_data.get(x).predecessors.add(w);
                    x_data.incomingEdges.add(e);
                }
            }
        }
        while (!stack.isEmpty()) {
            V x = stack.pop();

            //              for (V w : vertex_data.get(x).predecessors) 
            for (E e : vertex_data.get(x).incomingEdges) {
                V w = graph.getOpposite(x, e);
                double partialDependency = vertex_data.get(w).numSPs / vertex_data.get(x).numSPs
                        * (1.0 + vertex_data.get(x).dependency);
                vertex_data.get(w).dependency += partialDependency;
                //                  E w_x = graph.findEdge(w, x);
                //                  double w_x_score = edge_scores.get(w_x).doubleValue();
                //                  w_x_score += partialDependency;
                //                  edge_scores.put(w_x, w_x_score);
                double e_score = edge_scores.get(e).doubleValue();
                edge_scores.put(e, e_score + partialDependency);
            }
            if (!x.equals(v)) {
                double x_score = vertex_scores.get(x).doubleValue();
                x_score += vertex_data.get(x).dependency;
                vertex_scores.put(x, x_score);
            }
        }
    }

    if (graph instanceof UndirectedGraph) {
        for (V v : graph.getVertices()) {
            double v_score = vertex_scores.get(v).doubleValue();
            v_score /= 2.0;
            vertex_scores.put(v, v_score);
        }
        for (E e : graph.getEdges()) {
            double e_score = edge_scores.get(e).doubleValue();
            e_score /= 2.0;
            edge_scores.put(e, e_score);
        }
    }

    vertex_data.clear();
}

From source file:net.sf.eventgraphj.centrality.EgoNetworkBetweennessCentrality.java

protected void computeBetweenness(Queue<V> queue, Transformer<E, ? extends Number> edge_weights) {
    for (V v : graph.getVertices()) {
        // initialize the betweenness data for this new vertex
        for (V s : graph.getVertices())
            this.vertex_data.put(s, new BetweennessData());

        //         if (v.equals(new Integer(0)))
        //            System.out.println("pause");

        vertex_data.get(v).numSPs = 1;//  w  w  w.j  av  a2s. c o  m
        vertex_data.get(v).distance = 0;

        Stack<V> stack = new Stack<V>();
        //            Buffer<V> queue = new UnboundedFifoBuffer<V>();
        //            queue.add(v);
        queue.offer(v);

        while (!queue.isEmpty()) {
            //                V w = queue.remove();
            V w = queue.poll();
            stack.push(w);
            BetweennessData w_data = vertex_data.get(w);
            for (E e : graph.getOutEdges(w)) {
                // TODO (jrtom): change this to getOtherVertices(w, e)
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double wx_weight = edge_weights.transform(e).doubleValue();

                //                for(V x : graph.getSuccessors(w)) 
                //                {
                //                   if (x.equals(w))
                //                      continue;

                // FIXME: the other problem is that I need to 
                // keep putting the neighbors of things we've just 
                // discovered in the queue, if they're undiscovered or
                // at greater distance.

                // FIXME: this is the problem, right here, I think: 
                // need to update position in queue if distance changes
                // (which can only happen with weighted edges).
                // for each outgoing edge e from w, get other end x
                // if x not already visited (dist x < 0)
                //   set x's distance to w's dist + edge weight
                //   add x to queue; pri in queue is x's dist
                // if w's dist + edge weight < x's dist 
                //   update x's dist
                //   update x in queue (MapBinaryHeap)
                //   clear x's incoming edge list
                // if w's dist + edge weight = x's dist
                //   add e to x's incoming edge list

                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + wx_weight;
                if (x_potential_dist > this.egoNetworkSize)
                    continue;

                if (x_data.distance < 0) {
                    //                        queue.add(x);
                    //                        vertex_data.get(x).distance = vertex_data.get(w).distance + 1;
                    x_data.distance = x_potential_dist;
                    queue.offer(x);
                }

                // note:
                // (1) this can only happen with weighted edges
                // (2) x's SP count and incoming edges are updated below 
                if (x_data.distance > x_potential_dist) {
                    x_data.distance = x_potential_dist;
                    // invalidate previously identified incoming edges
                    // (we have a new shortest path distance to x)
                    x_data.incomingEdges.clear();
                    // update x's position in queue
                    ((MapBinaryHeap<V>) queue).update(x);
                }
                //                  if (vertex_data.get(x).distance == vertex_data.get(w).distance + 1) 
                // 
                //                    if (x_data.distance == x_potential_dist) 
                //                    {
                //                        x_data.numSPs += w_data.numSPs;
                ////                        vertex_data.get(x).predecessors.add(w);
                //                        x_data.incomingEdges.add(e);
                //                    }
            }
            for (E e : graph.getOutEdges(w)) {
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double e_weight = edge_weights.transform(e).doubleValue();
                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + e_weight;
                if (x_data.distance == x_potential_dist) {
                    x_data.numSPs += w_data.numSPs;
                    //                        vertex_data.get(x).predecessors.add(w);
                    x_data.incomingEdges.add(e);
                }
            }
        }
        while (!stack.isEmpty()) {
            V x = stack.pop();

            //              for (V w : vertex_data.get(x).predecessors) 
            for (E e : vertex_data.get(x).incomingEdges) {
                V w = graph.getOpposite(x, e);
                double partialDependency = vertex_data.get(w).numSPs / vertex_data.get(x).numSPs
                        * (1.0 + vertex_data.get(x).dependency);
                vertex_data.get(w).dependency += partialDependency;
                //                  E w_x = graph.findEdge(w, x);
                //                  double w_x_score = edge_scores.get(w_x).doubleValue();
                //                  w_x_score += partialDependency;
                //                  edge_scores.put(w_x, w_x_score);
                double e_score = edge_scores.get(e).doubleValue();
                edge_scores.put(e, e_score + partialDependency);
            }
            if (!x.equals(v)) {
                double x_score = vertex_scores.get(x).doubleValue();
                x_score += vertex_data.get(x).dependency;
                vertex_scores.put(x, x_score);
            }
        }
    }

    if (graph instanceof UndirectedGraph) {
        for (V v : graph.getVertices()) {
            double v_score = vertex_scores.get(v).doubleValue();
            v_score /= 2.0;
            vertex_scores.put(v, v_score);
        }
        for (E e : graph.getEdges()) {
            double e_score = edge_scores.get(e).doubleValue();
            e_score /= 2.0;
            edge_scores.put(e, e_score);
        }
    }

    vertex_data.clear();
}

From source file:org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.java

/**
 * Create Hive splits based on CombineFileSplit.
 *///from www. j  av a  2  s. c o m
private InputSplit[] getCombineSplits(JobConf job, int numSplits,
        Map<String, PartitionDesc> pathToPartitionInfo) throws IOException {
    init(job);
    Map<String, ArrayList<String>> pathToAliases = mrwork.getPathToAliases();
    Map<String, Operator<? extends OperatorDesc>> aliasToWork = mrwork.getAliasToWork();
    CombineFileInputFormatShim combine = ShimLoader.getHadoopShims().getCombineFileInputFormat();

    InputSplit[] splits = null;
    if (combine == null) {
        splits = super.getSplits(job, numSplits);
        return splits;
    }

    if (combine.getInputPathsShim(job).length == 0) {
        throw new IOException("No input paths specified in job");
    }
    ArrayList<InputSplit> result = new ArrayList<InputSplit>();

    // combine splits only from same tables and same partitions. Do not combine splits from multiple
    // tables or multiple partitions.
    Path[] paths = combine.getInputPathsShim(job);

    List<Path> inpDirs = new ArrayList<Path>();
    List<Path> inpFiles = new ArrayList<Path>();
    Map<CombinePathInputFormat, CombineFilter> poolMap = new HashMap<CombinePathInputFormat, CombineFilter>();
    Set<Path> poolSet = new HashSet<Path>();

    for (Path path : paths) {
        PartitionDesc part = HiveFileFormatUtils.getPartitionDescFromPathRecursively(pathToPartitionInfo, path,
                IOPrepareCache.get().allocatePartitionDescMap());
        TableDesc tableDesc = part.getTableDesc();
        if ((tableDesc != null) && tableDesc.isNonNative()) {
            return super.getSplits(job, numSplits);
        }

        // Use HiveInputFormat if any of the paths is not splittable
        Class inputFormatClass = part.getInputFileFormatClass();
        String inputFormatClassName = inputFormatClass.getName();
        InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
        String deserializerClassName = null;
        try {
            deserializerClassName = part.getDeserializer(job).getClass().getName();
        } catch (Exception e) {
            // ignore
        }
        FileSystem inpFs = path.getFileSystem(job);

        // Since there is no easy way of knowing whether MAPREDUCE-1597 is present in the tree or not,
        // we use a configuration variable for the same
        if (this.mrwork != null && !this.mrwork.getHadoopSupportsSplittable()) {
            // The following code should be removed, once
            // https://issues.apache.org/jira/browse/MAPREDUCE-1597 is fixed.
            // Hadoop does not handle non-splittable files correctly for CombineFileInputFormat,
            // so don't use CombineFileInputFormat for non-splittable files

            //ie, dont't combine if inputformat is a TextInputFormat and has compression turned on

            if (inputFormat instanceof TextInputFormat) {
                Queue<Path> dirs = new LinkedList<Path>();
                FileStatus fStats = inpFs.getFileStatus(path);

                // If path is a directory
                if (fStats.isDir()) {
                    dirs.offer(path);
                } else if ((new CompressionCodecFactory(job)).getCodec(path) != null) {
                    //if compresssion codec is set, use HiveInputFormat.getSplits (don't combine)
                    splits = super.getSplits(job, numSplits);
                    return splits;
                }

                while (dirs.peek() != null) {
                    Path tstPath = dirs.remove();
                    FileStatus[] fStatus = inpFs.listStatus(tstPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
                    for (int idx = 0; idx < fStatus.length; idx++) {
                        if (fStatus[idx].isDir()) {
                            dirs.offer(fStatus[idx].getPath());
                        } else if ((new CompressionCodecFactory(job))
                                .getCodec(fStatus[idx].getPath()) != null) {
                            //if compresssion codec is set, use HiveInputFormat.getSplits (don't combine)
                            splits = super.getSplits(job, numSplits);
                            return splits;
                        }
                    }
                }
            }
        }
        //don't combine if inputformat is a SymlinkTextInputFormat
        if (inputFormat instanceof SymlinkTextInputFormat) {
            splits = super.getSplits(job, numSplits);
            return splits;
        }

        Path filterPath = path;

        // Does a pool exist for this path already
        CombineFilter f = null;
        List<Operator<? extends OperatorDesc>> opList = null;

        if (!mrwork.isMapperCannotSpanPartns()) {
            //if mapper can span partitions, make sure a splits does not contain multiple
            // opList + inputFormatClassName + deserializerClassName combination
            // This is done using the Map of CombinePathInputFormat to PathFilter

            opList = HiveFileFormatUtils.doGetWorksFromPath(pathToAliases, aliasToWork, filterPath);
            CombinePathInputFormat combinePathInputFormat = new CombinePathInputFormat(opList,
                    inputFormatClassName, deserializerClassName);
            f = poolMap.get(combinePathInputFormat);
            if (f == null) {
                f = new CombineFilter(filterPath);
                LOG.info("CombineHiveInputSplit creating pool for " + path + "; using filter path "
                        + filterPath);
                combine.createPool(job, f);
                poolMap.put(combinePathInputFormat, f);
            } else {
                LOG.info("CombineHiveInputSplit: pool is already created for " + path + "; using filter path "
                        + filterPath);
                f.addPath(filterPath);
            }
        } else {
            // In the case of tablesample, the input paths are pointing to files rather than directories.
            // We need to get the parent directory as the filtering path so that all files in the same
            // parent directory will be grouped into one pool but not files from different parent
            // directories. This guarantees that a split will combine all files in the same partition
            // but won't cross multiple partitions if the user has asked so.
            if (!path.getFileSystem(job).getFileStatus(path).isDir()) { // path is not directory
                filterPath = path.getParent();
                inpFiles.add(path);
                poolSet.add(filterPath);
            } else {
                inpDirs.add(path);
            }
        }
    }

    // Processing directories
    List<CombineFileSplit> iss = new ArrayList<CombineFileSplit>();
    if (!mrwork.isMapperCannotSpanPartns()) {
        //mapper can span partitions
        //combine into as few as one split, subject to the PathFilters set
        // using combine.createPool.
        iss = Arrays.asList(combine.getSplits(job, 1));
    } else {
        for (Path path : inpDirs) {
            processPaths(job, combine, iss, path);
        }

        if (inpFiles.size() > 0) {
            // Processing files
            for (Path filterPath : poolSet) {
                combine.createPool(job, new CombineFilter(filterPath));
            }
            processPaths(job, combine, iss, inpFiles.toArray(new Path[0]));
        }
    }

    if (mrwork.getNameToSplitSample() != null && !mrwork.getNameToSplitSample().isEmpty()) {
        iss = sampleSplits(iss);
    }

    for (CombineFileSplit is : iss) {
        CombineHiveInputSplit csplit = new CombineHiveInputSplit(job, is, pathToPartitionInfo);
        result.add(csplit);
    }

    LOG.info("number of splits " + result.size());
    return result.toArray(new CombineHiveInputSplit[result.size()]);
}

From source file:org.kuali.rice.krad.uif.service.impl.ViewHelperServiceImpl.java

/**
 * {@inheritDoc}/*from  w ww . j  a  va2 s .  co m*/
 */
@Override
public void applyDefaultValues(Component component) {
    if (component == null) {
        return;
    }

    View view = ViewLifecycle.getView();
    Object model = ViewLifecycle.getModel();

    @SuppressWarnings("unchecked")
    Queue<LifecycleElement> elementQueue = RecycleUtils.getInstance(LinkedList.class);
    elementQueue.offer(component);
    try {
        while (!elementQueue.isEmpty()) {
            LifecycleElement currentElement = elementQueue.poll();

            // if component is a data field apply default value
            if (currentElement instanceof DataField) {
                DataField dataField = ((DataField) currentElement);

                // need to make sure binding is initialized since this could be on a page we have not initialized yet
                dataField.getBindingInfo().setDefaults(view, dataField.getPropertyName());

                populateDefaultValueForField(model, dataField, dataField.getBindingInfo().getBindingPath());
            }

            elementQueue.addAll(ViewLifecycleUtils.getElementsForLifecycle(currentElement).values());
        }
    } finally {
        elementQueue.clear();
        RecycleUtils.recycle(elementQueue);
    }
}

From source file:org.rhq.cassandra.ClusterInitService.java

/**
 * This method attempts to establish a Thrift RPC connection to each host for the
 * number specified. In other words, if there are four hosts and <code>numHosts</code>
 * is 2, this method will block only until it can connect to two of the hosts. If the
 * connection fails, the host is retried after going through the other, remaining
 * hosts.//from   w  w w .ja  v a2 s  .c  o  m
 * <br/><br/>
 * After connecting to all cluster nodes, this method will sleep for 10 seconds
 * before returning. This is to give the cluster a chance to create the system auth
 * schema and to create the cassandra super user. Cassandra has a hard-coded delay of
 * 10 sceonds before it creates the super user, which means the rhq schema cannot be
 * created before that.
 * @param numHosts The number of hosts to which a successful connection has to be made
 *                 before returning.
 * @param delay The amount of time wait between attempts to make a connection
 * @param retries The number of times to retry connecting. A runtime exception will be
 *                thrown when the number of failed connections exceeds this value.
 * @param initialWait The amount of seconds before first try.
 */
public void waitForClusterToStart(String[] storageNodes, int jmxPorts[], int numHosts, long delay, int retries,
        int initialWait) {
    if (initialWait > 0) {
        if (log.isDebugEnabled()) {
            log.debug("Waiting before JMX calls to the storage nodes for " + initialWait + " seconds...");
        }
        sleep(initialWait * 1000);
    }

    int connections = 0;
    int failedConnections = 0;
    Queue<Integer> queue = new LinkedList<Integer>();
    for (int index = 0; index < storageNodes.length; index++) {
        queue.add(index);
    }

    Integer storageNodeIndex = queue.poll();

    while (storageNodeIndex != null) {
        if (failedConnections >= retries) {
            throw new RuntimeException("Unable to verify that cluster nodes have started after "
                    + failedConnections + " failed attempts");
        }
        try {
            boolean isNativeTransportRunning = isNativeTransportRunning(storageNodes[storageNodeIndex],
                    jmxPorts[storageNodeIndex]);
            if (log.isDebugEnabled() && isNativeTransportRunning) {
                log.debug("Successfully connected to cassandra node [" + storageNodes[storageNodeIndex] + "]");
            }
            if (isNativeTransportRunning) {
                ++connections;
            } else {
                queue.offer(storageNodeIndex);
            }
            if (connections == numHosts) {
                if (log.isDebugEnabled()) {
                    log.debug("Successdully connected to all nodes. Sleeping for 10 seconds to allow for the "
                            + "cassandra superuser set up to complete.");
                }
                sleep(10 * 1000);
                return;
            }
        } catch (Exception e) {
            ++failedConnections;
            queue.offer(storageNodeIndex);
            if (log.isDebugEnabled()) {
                log.debug("Unable to open JMX connection on port [" + jmxPorts[storageNodeIndex]
                        + "] to cassandra node [" + storageNodes[storageNodeIndex] + "].", e);
            } else if (log.isInfoEnabled()) {
                log.debug("Unable to open connection to cassandra node.");
            }
        }
        sleep(delay);
        storageNodeIndex = queue.poll();
    }
}