Example usage for java.util Queue remove

List of usage examples for java.util Queue remove

Introduction

In this page you can find the example usage for java.util Queue remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of this queue.

Usage

From source file:org.apache.hadoop.hive.ql.QueryPlan.java

/**
 * generate the operator graph and operator list for the given task based on
 * the operators corresponding to that task.
 *
 * @param task//from   w  ww .j  a  v a  2  s  .co m
 *          api.Task which needs its operator graph populated
 * @param topOps
 *          the set of top operators from which the operator graph for the
 *          task is hanging
 */
private void populateOperatorGraph(org.apache.hadoop.hive.ql.plan.api.Task task,
        Collection<Operator<? extends OperatorDesc>> topOps) {

    task.setOperatorGraph(new org.apache.hadoop.hive.ql.plan.api.Graph());
    task.getOperatorGraph().setNodeType(NodeType.OPERATOR);

    Queue<Operator<? extends OperatorDesc>> opsToVisit = new LinkedList<Operator<? extends OperatorDesc>>();
    Set<Operator<? extends OperatorDesc>> opsVisited = new HashSet<Operator<? extends OperatorDesc>>();
    opsToVisit.addAll(topOps);
    while (opsToVisit.peek() != null) {
        Operator<? extends OperatorDesc> op = opsToVisit.remove();
        opsVisited.add(op);
        // populate the operator
        org.apache.hadoop.hive.ql.plan.api.Operator operator = new org.apache.hadoop.hive.ql.plan.api.Operator();
        operator.setOperatorId(op.getOperatorId());
        operator.setOperatorType(op.getType());
        task.addToOperatorList(operator);
        // done processing the operator
        if (op.getChildOperators() != null) {
            org.apache.hadoop.hive.ql.plan.api.Adjacency entry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
            entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE);
            entry.setNode(op.getOperatorId());
            for (Operator<? extends OperatorDesc> childOp : op.getChildOperators()) {
                entry.addToChildren(childOp.getOperatorId());
                if (!opsVisited.contains(childOp)) {
                    opsToVisit.add(childOp);
                }
            }
            task.getOperatorGraph().addToAdjacencyList(entry);
        }
    }
}

From source file:org.squashtest.tm.service.internal.testcase.CustomTestCaseModificationServiceImpl.java

@Override
// TODO : secure this
public TestCase addNewTestCaseVersion(long originalTcId, TestCase newVersionData) {

    List<Long> milestoneIds = new ArrayList<>();

    Optional<Milestone> activeMilestone = activeMilestoneHolder.getActiveMilestone();
    if (activeMilestone.isPresent()) {
        milestoneIds.add(activeMilestone.get().getId());
    }//from  w  w  w. jav a 2  s. co m

    // copy the core attributes
    TestCase orig = testCaseDao.findById(originalTcId);
    TestCase newTC = orig.createCopy();

    newTC.setName(newVersionData.getName());
    newTC.setReference(newVersionData.getReference());
    newTC.setDescription(newVersionData.getDescription());
    newTC.clearMilestones();

    // now we must inster that at the correct location
    TestCaseLibrary library = libraryService.findLibraryOfRootNodeIfExist(orig);
    if (library != null) {
        libraryService.addTestCaseToLibrary(library.getId(), newTC, null);
    } else {
        TestCaseFolder folder = libraryService.findParentIfExists(orig);
        libraryService.addTestCaseToFolder(folder.getId(), newTC, null);
    }

    // copy custom fields
    customFieldValuesService.copyCustomFieldValuesContent(orig, newTC);
    Queue<ActionTestStep> origSteps = new LinkedList<>(orig.getActionSteps());
    Queue<ActionTestStep> newSteps = new LinkedList<>(newTC.getActionSteps());
    while (!origSteps.isEmpty()) {
        ActionTestStep oStep = origSteps.remove();
        ActionTestStep nStep = newSteps.remove();
        customFieldValuesService.copyCustomFieldValuesContent(oStep, nStep);
    }

    // manage the milestones
    milestoneService.bindTestCaseToMilestones(newTC.getId(), milestoneIds);
    milestoneService.unbindTestCaseFromMilestones(originalTcId, milestoneIds);

    return newTC;
}

From source file:it.geosolutions.geobatch.unredd.script.reprocess.ReprocessAction.java

/**
 * Main loop on input files. Single file processing is called on execute(File xmlFile)
 *//*  ww  w  .  j  a v a  2 s .c o  m*/
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    if (getTempDir() == null) {
        throw new IllegalStateException("temp dir has not been initialized");
    }
    if (!getTempDir().exists()) {
        throw new IllegalStateException("temp dir does not exist");
    }

    geoStoreUtil = new GeoStoreUtil(conf.getGeoStoreConfig(), getTempDir());

    //        initComponents(properties);

    final Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();

    while (!events.isEmpty()) {
        final FileSystemEvent ev = events.remove();

        try {
            if (ev != null) {
                if (LOGGER.isTraceEnabled()) {
                    LOGGER.trace("Processing incoming event: " + ev.getSource());
                }
                File xmlFile = ev.getSource(); // this is the input xml file

                /**
                 * *************************
                 * The reprocessing flow will recompute statistics and charts. it is needed when data in the staging area are
                 * changed; i.e.: - vector data are edited; - chart scripts are modified or inserted; - new statistics are
                 * added. Each doXXX methos manages one of this case
                 */
                ReprocessRequest request = RequestReader.load(xmlFile);
                if (request == null) {
                    throw new ActionException(this, "Could not parse input file:" + xmlFile.getName());
                }

                if (request instanceof ReprocessLayerRequest) {
                    reprocessLayer((ReprocessLayerRequest) request);

                } else if (request instanceof ReprocessChartRequest) {
                    reprocessChart((ReprocessChartRequest) request);

                } else if (request instanceof ReprocessStatsRequest) {
                    reprocessStats((ReprocessStatsRequest) request);

                }

                ret.add(new FileSystemEvent(xmlFile, FileSystemEventType.FILE_ADDED));

            } else {
                LOGGER.error("Encountered a null event: skipping event");
                continue;
            }

        } catch (ActionException ex) {
            LOGGER.error(ex.getMessage());
            listenerForwarder.failed(ex);
            throw ex;

        } catch (Exception ex) {
            LOGGER.error(ex.getMessage(), ex);
            listenerForwarder.failed(ex);
            throw new ActionException(this, ex.getMessage(), ex);
        }
    }

    return ret;
}

From source file:org.apache.hadoop.hive.ql.QueryPlan.java

/**
 * Extract all the counters from tasks and operators.
 *///from  ww  w.j a va2s .  c om
private void extractCounters() throws IOException {
    Queue<Task<? extends Serializable>> tasksToVisit = new LinkedList<Task<? extends Serializable>>();
    Set<Task<? extends Serializable>> tasksVisited = new HashSet<Task<? extends Serializable>>();
    tasksToVisit.addAll(rootTasks);
    while (tasksToVisit.peek() != null) {
        Task<? extends Serializable> task = tasksToVisit.remove();
        tasksVisited.add(task);
        // add children to tasksToVisit
        if (task.getChildTasks() != null) {
            for (Task<? extends Serializable> childTask : task.getChildTasks()) {
                if (!tasksVisited.contains(childTask)) {
                    tasksToVisit.add(childTask);
                }
            }
        }
        if (task.getId() == null) {
            continue;
        }
        if (started.contains(task.getId()) && done.contains(task.getId())) {
            continue;
        }

        // get the counters for the task
        counters.put(task.getId(), task.getCounters());

        // check if task is started
        if (task.started()) {
            started.add(task.getId());
        }
        if (task.done()) {
            done.add(task.getId());
        }
        if (task instanceof ExecDriver) {
            ExecDriver mrTask = (ExecDriver) task;
            if (mrTask.mapStarted()) {
                started.add(task.getId() + "_MAP");
            }
            if (mrTask.mapDone()) {
                done.add(task.getId() + "_MAP");
            }
            if (mrTask.hasReduce()) {
                if (mrTask.reduceStarted()) {
                    started.add(task.getId() + "_REDUCE");
                }
                if (mrTask.reduceDone()) {
                    done.add(task.getId() + "_REDUCE");
                }
            }
        } else if (task instanceof ConditionalTask) {
            ConditionalTask cTask = (ConditionalTask) task;
            for (Task<? extends Serializable> listTask : cTask.getListTasks()) {
                if (!tasksVisited.contains(listTask)) {
                    tasksToVisit.add(listTask);
                }
            }
        }
    }
}

From source file:org.apache.hadoop.hive.ql.QueryPlan.java

/**
 * Populate api.QueryPlan from exec structures. This includes constructing the
 * dependency graphs of stages and operators.
 *
 * @throws IOException/* w  ww.  ja  v a2  s .co  m*/
 */
private void populateQueryPlan() throws IOException {
    query.setStageGraph(new org.apache.hadoop.hive.ql.plan.api.Graph());
    query.getStageGraph().setNodeType(NodeType.STAGE);

    Queue<Task<? extends Serializable>> tasksToVisit = new LinkedList<Task<? extends Serializable>>();
    Set<Task<? extends Serializable>> tasksVisited = new HashSet<Task<? extends Serializable>>();
    tasksToVisit.addAll(rootTasks);
    while (tasksToVisit.size() != 0) {
        Task<? extends Serializable> task = tasksToVisit.remove();
        tasksVisited.add(task);
        // populate stage
        org.apache.hadoop.hive.ql.plan.api.Stage stage = new org.apache.hadoop.hive.ql.plan.api.Stage();
        stage.setStageId(task.getId());
        stage.setStageType(task.getType());
        query.addToStageList(stage);

        if (task instanceof ExecDriver) {
            // populate map task
            ExecDriver mrTask = (ExecDriver) task;
            org.apache.hadoop.hive.ql.plan.api.Task mapTask = new org.apache.hadoop.hive.ql.plan.api.Task();
            mapTask.setTaskId(stage.getStageId() + "_MAP");
            mapTask.setTaskType(TaskType.MAP);
            stage.addToTaskList(mapTask);
            populateOperatorGraph(mapTask, mrTask.getWork().getMapWork().getAliasToWork().values());

            // populate reduce task
            if (mrTask.hasReduce()) {
                org.apache.hadoop.hive.ql.plan.api.Task reduceTask = new org.apache.hadoop.hive.ql.plan.api.Task();
                reduceTask.setTaskId(stage.getStageId() + "_REDUCE");
                reduceTask.setTaskType(TaskType.REDUCE);
                stage.addToTaskList(reduceTask);
                Collection<Operator<? extends OperatorDesc>> reducerTopOps = new ArrayList<Operator<? extends OperatorDesc>>();
                reducerTopOps.add(mrTask.getWork().getReduceWork().getReducer());
                populateOperatorGraph(reduceTask, reducerTopOps);
            }
        } else {
            org.apache.hadoop.hive.ql.plan.api.Task otherTask = new org.apache.hadoop.hive.ql.plan.api.Task();
            otherTask.setTaskId(stage.getStageId() + "_OTHER");
            otherTask.setTaskType(TaskType.OTHER);
            stage.addToTaskList(otherTask);
        }
        if (task instanceof ConditionalTask) {
            org.apache.hadoop.hive.ql.plan.api.Adjacency listEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
            listEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE);
            listEntry.setNode(task.getId());
            ConditionalTask t = (ConditionalTask) task;

            for (Task<? extends Serializable> listTask : t.getListTasks()) {
                if (t.getChildTasks() != null) {
                    org.apache.hadoop.hive.ql.plan.api.Adjacency childEntry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
                    childEntry.setAdjacencyType(AdjacencyType.DISJUNCTIVE);
                    childEntry.setNode(listTask.getId());
                    // done processing the task
                    for (Task<? extends Serializable> childTask : t.getChildTasks()) {
                        childEntry.addToChildren(childTask.getId());
                        if (!tasksVisited.contains(childTask)) {
                            tasksToVisit.add(childTask);
                        }
                    }
                    query.getStageGraph().addToAdjacencyList(childEntry);
                }

                listEntry.addToChildren(listTask.getId());
                if (!tasksVisited.contains(listTask)) {
                    tasksToVisit.add(listTask);
                }
            }
            query.getStageGraph().addToAdjacencyList(listEntry);
        } else if (task.getChildTasks() != null) {
            org.apache.hadoop.hive.ql.plan.api.Adjacency entry = new org.apache.hadoop.hive.ql.plan.api.Adjacency();
            entry.setAdjacencyType(AdjacencyType.CONJUNCTIVE);
            entry.setNode(task.getId());
            // done processing the task
            for (Task<? extends Serializable> childTask : task.getChildTasks()) {
                entry.addToChildren(childTask.getId());
                if (!tasksVisited.contains(childTask)) {
                    tasksToVisit.add(childTask);
                }
            }
            query.getStageGraph().addToAdjacencyList(entry);
        }
    }
}

From source file:gov.nih.nci.grididloader.BigIdCreator.java

/**
 * Create Big Id's for each entity and save them into the database.
 * Each entity is updated in parallel by several threads, but the entities
 * are processed in a serial fashion./*from  w w w. j av  a  2  s.co m*/
 */
public void createAndUpdate() throws Exception {

    if (hiFactory.getSystemType() == HandleInterfaceType.CLASSIC) {
        // Create site handle, if the database is empty.
        // This is necessary because otherwise 50 threads will try to create it
        // at once, resulting in duplicates and a subsequent avalanche of collisions
        final HandleRepositoryIDInterface idSvc = (HandleRepositoryIDInterface) hiFactory.getHandleInterface();
        // create dummy id (also creates site handle)
        ResourceIdInfo rid = new ResourceIdInfo(new URI("urn://ncicb"), "dummy");
        idSvc.createOrGetGlobalID(rid);
        // remove the id we created, the site handle will remain
        idSvc.removeGlobalID(rid);
    }

    Connection conn = null;
    FileWriter benchmarkFile = null;

    try {
        benchmarkFile = new FileWriter("timings.txt");
        conn = dataSource.getConnection();

        for (BigEntity entity : config.getEntities()) {

            final String className = entity.getClassName();
            if (!classFilter.isEmpty() && ((include && !classFilter.contains(className))
                    || (!include && classFilter.contains(className)))) {
                System.err.println("Filtered out " + className);
                continue;
            }

            long start = System.currentTimeMillis();

            final String table = entity.getTableName();
            final String id = entity.getPrimaryKey();

            Statement stmt = null;
            ResultSet rs = null;
            long numRows = 0;
            long minId = 0;
            long maxId = 0;

            try {
                // get number of rows and id space for the current entity
                stmt = conn.createStatement();
                rs = stmt.executeQuery(
                        "SELECT MIN(" + id + ") minId, MAX(" + id + ") maxId, COUNT(*) rowCount FROM " + table);
                rs.next();
                numRows = rs.getLong("rowCount");
                minId = rs.getLong("minId");
                maxId = rs.getLong("maxId");
            } catch (SQLException e) {
                System.err.println("Error processing " + table);
                e.printStackTrace();
                continue;
            } finally {
                try {
                    if (rs != null)
                        rs.close();
                    if (stmt != null)
                        stmt.close();
                } catch (SQLException e) {
                    e.printStackTrace();
                }
            }

            /* This is an overly complicated formula to figure out the best 
             * chunk size possible. 
             * 
             * First we determine the idealChunkSize for the amount of rows
             * we are dealing with, based on a linear step equation:
             *10000|   ______
             * 9500|   :
             *     |  /:
             *     | / :
             * 500 |/  :
             * ____|___:_____
             *     0   500,000
             *          
             * In other words, the minimum chunk is 500. As the number of rows 
             * increases, the chunk size grows up to 9500. But after 500000 
             * rows, the chunk size jumps to 10000 and stays constant so that 
             * we don't overload each thread. Therefore, the chunk size is 
             * always between 500 and 10000. 
             * 
             * Secondly, the identifier spread is calculated and multiplied by 
             * the idealChunkSize to get the final chunkSize. If the ids are 
             * equal to the row numbers, the spread is 1 and the chunk size is 
             * ok. If, however, the id space is gigantic, then the chunk size 
             * will be increased proportionally to the average distance between
             * ids (assuming the ids are uniformally distributed).  
             *  
             * This actually works perfectly only if the ids ARE uniformally
             * distributed. In other corner cases, where the ids are clustered
             * together within a huge id space, the id space must be
             * partitioned recursively. 
             */
            final float idealChunkSize = (numRows > 500000) ? 10000 : .018f * numRows + 500;
            final float spread = (float) (maxId - minId + 1) / (float) numRows;
            final long chunkSize = Math.round(idealChunkSize * spread);

            System.out.println("Processing " + entity + " (" + entity.getTableName() + ") rows(" + numRows
                    + ") range(" + minId + "," + maxId + ") parallel(" + entity.isParallelLoadable() + ")");
            System.out.println("Parameters: spread(" + spread + ") chunkSize(ideal=" + idealChunkSize
                    + " actual=" + chunkSize + ")");

            final Map<BatchUpdate, Future<Boolean>> futures = new HashMap<BatchUpdate, Future<Boolean>>();
            final Queue<BatchUpdate> updates = new LinkedList<BatchUpdate>();

            // start each chunk as a task on the executor
            for (long i = minId; i <= maxId; i += chunkSize) {
                BatchUpdate update = new BatchUpdate(dataSource, hiFactory, entity, i, i + chunkSize - 1);
                updates.add(update);

                Future<Boolean> future = entity.isParallelLoadable() ? parallelExecutor.submit(update)
                        : serialExecutor.submit(update);

                futures.put(update, future);
            }

            // wait for all updates to finish
            while (!updates.isEmpty()) {
                final BatchUpdate update = updates.remove();
                final Future<Boolean> future = futures.remove(update);
                try {
                    // this get() blocks until the future is available
                    Boolean success = future.get();
                    if (success == null || !success.booleanValue()) {
                        System.err.println("FAILED: " + update);
                    } else {
                        int n = update.getNumUpdated();
                        if (n == 0) {
                            System.out.println("  done " + update + " (no rows found)");
                        } else {
                            int ut = (int) update.getAverageUpdateTime();
                            int ht = (int) update.getAverageHandleTime();
                            System.out.println("  done " + update + " rows(" + n + " rows) avg(handle=" + ht
                                    + "ms, update=" + ut + "ms)");
                        }
                    }
                } catch (ExecutionException e) {
                    System.err.println("Updated failed for entity: " + entity);
                    e.printStackTrace();
                } catch (InterruptedException e) {
                    System.err.println("Updated failed for entity: " + entity);
                    e.printStackTrace();
                }
            }

            float time = System.currentTimeMillis() - start;
            System.out.println("Done " + entity + " (" + (time / 1000) + " sec)\n");
            benchmarkFile.write(entity.getClassName() + "\t" + numRows + "\t" + time + "\n");
            benchmarkFile.flush();
        }

    } finally {
        try {
            if (conn != null)
                conn.close();
            if (benchmarkFile != null)
                benchmarkFile.close();
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    // Done 
    parallelExecutor.shutdown();
    serialExecutor.shutdown();
}

From source file:com.linkedin.pinot.core.startree.OffHeapStarTreeBuilder.java

/**
 * Helper method that visits each leaf node does the following:
 * - Re-orders the doc-id's corresponding to leaf node wrt time column.
 * - Create children nodes for each time value under this leaf node.
 * - Adds a new record with aggregated data for this leaf node.
 * @throws Exception/*from   w ww . ja  v a  2  s . co  m*/
 */
private void splitLeafNodesOnTimeColumn() throws Exception {
    Queue<StarTreeIndexNode> nodes = new LinkedList<>();
    nodes.add(starTreeRootIndexNode);
    StarTreeDataSorter dataSorter = new StarTreeDataSorter(dataFile, dimensionSizeBytes, metricSizeBytes);
    while (!nodes.isEmpty()) {
        StarTreeIndexNode node = nodes.remove();
        if (node.isLeaf()) {
            // If we have time column, split on time column, helps in time based filtering
            if (timeColumnName != null) {
                int level = node.getLevel();
                int[] newSortOrder = moveColumnInSortOrder(timeColumnName, getSortOrder(), level);

                int startDocId = node.getStartDocumentId();
                int endDocId = node.getEndDocumentId();
                dataSorter.sort(startDocId, endDocId, newSortOrder);
                int timeColIndex = dimensionNameToIndexMap.get(timeColumnName);
                Map<Integer, IntPair> timeColumnRangeMap = dataSorter.groupByIntColumnCount(startDocId,
                        endDocId, timeColIndex);

                node.setChildDimensionName(timeColIndex);
                node.setChildren(new HashMap<Integer, StarTreeIndexNode>());

                for (int timeValue : timeColumnRangeMap.keySet()) {
                    IntPair range = timeColumnRangeMap.get(timeValue);
                    StarTreeIndexNode child = new StarTreeIndexNode();
                    child.setDimensionName(timeColIndex);
                    child.setDimensionValue(timeValue);
                    child.setParent(node);
                    child.setLevel(node.getLevel() + 1);
                    child.setStartDocumentId(range.getLeft());
                    child.setEndDocumentId(range.getRight());
                    node.addChild(child, timeValue);
                }
            }
        } else {
            Iterator<StarTreeIndexNode> childrenIterator = node.getChildrenIterator();
            while (childrenIterator.hasNext()) {
                nodes.add(childrenIterator.next());
            }
        }
    }
    dataSorter.close();
}

From source file:nl.b3p.viewer.config.services.WMSService.java

/**
 * Update the tree structure of Layers by following the tree structure and
 * setting the parent and children accordingly. Reuses entities for layers
 * which are UNMODIFIED or UPDATED and inserts new entities for NEW layers.
 * <p>//w w w.ja v  a 2 s  .  com
 * Because virtual layers with null name cannot be updated, those are always
 * recreated and user set properties are lost, except those set on the top
 * layer which are preserved.
 * <p>
 * Interface should disallow setting user properties (especially authorizations)
 * on virtual layers.
 */
private void updateLayerTree(final WMSService update, final UpdateResult result) {

    Layer newTopLayer;

    String topLayerName = update.getTopLayer().getName();
    if (topLayerName == null) {
        // Start with a new no name topLayer
        newTopLayer = update.getTopLayer().pluckCopy();
    } else {
        // Old persistent top layer or new plucked copy from updated service
        newTopLayer = result.getLayerStatus().get(topLayerName).getLeft();
    }

    // Copy user set stuff over from old toplayer, even if name was changed
    // or topLayer has no name
    newTopLayer.copyUserModifiedProperties(getTopLayer());

    newTopLayer.setParent(null);
    newTopLayer.setService(this);
    newTopLayer.getChildren().clear();
    setTopLayer(newTopLayer);

    // Do a breadth-first traversal to set the parent and fill the children
    // list of all layers.
    // For the breadth-first traversal save layers from updated service to
    // visit with their (possibly persistent) parent layers from this service

    // XXX why did we need BFS?

    Queue<Pair<Layer, Layer>> q = new LinkedList();

    // Start at children of topLayer from updated service, topLayer handled
    // above
    for (Layer child : update.getTopLayer().getChildren()) {
        q.add(new ImmutablePair(child, newTopLayer));
    }

    Set<String> visitedLayerNames = new HashSet();

    do {
        // Remove from head of queue
        Pair<Layer, Layer> p = q.remove();

        Layer updateLayer = p.getLeft(); // layer from updated service
        Layer parent = p.getRight(); // parent layer from this

        Layer thisLayer;
        String layerName = updateLayer.getName();
        if (layerName == null) {
            // 'New' no name layer - we can't possibly guess if it is
            // the same as an already existing no name layer so always
            // new entity
            thisLayer = updateLayer.pluckCopy();
        } else {

            if (visitedLayerNames.contains(layerName)) {
                // Duplicate layer in updated service -- ignore this one
                thisLayer = null;
            } else {
                // Find possibly already persistent updated layer
                // (depth first) - if new already a pluckCopy()
                thisLayer = result.getLayerStatus().get(layerName).getLeft();
                visitedLayerNames.add(layerName);
            }
        }

        if (thisLayer != null) {
            thisLayer.setService(this);
            thisLayer.setParent(parent);
            parent.getChildren().add(thisLayer);
        }

        for (Layer child : updateLayer.getChildren()) {
            // Add add end of queue
            q.add(new ImmutablePair(child, thisLayer));
        }
    } while (!q.isEmpty());
}

From source file:edu.snu.leader.hidden.SimulationState.java

/**
 * Signals that the specified individual has canceled a group movement
 *
 * @param individual/*from w  w w. ja  v a2 s  . c  om*/
 */
public void cancelInitiation(SpatialIndividual individual) {
    if (_LOG.isDebugEnabled()) {
        _LOG.debug("Before cancel [" + individual.getID() + "]: eligibleInitiators=["
                + _eligibleInitiators.size() + "] remaining=[" + _remaining.size() + "] totalFollowers=["
                + individual.getTotalFollowerCount() + "]");
    }

    // Send it a signal so it can log some information
    individual.signalInitiationFailure(this);

    // We need to maintain a list of all the affected individuals
    List<SpatialIndividual> affected = new LinkedList<SpatialIndividual>();

    // Build the list starting with the initiator itself
    Queue<SpatialIndividual> indsToProcess = new LinkedList<SpatialIndividual>();
    indsToProcess.add(individual);
    while (!indsToProcess.isEmpty()) {
        // Get the first in the queue
        SpatialIndividual current = indsToProcess.remove();

        //            _LOG.debug( "Processing ["
        //                    + current.getID()
        //                    + "]" );

        // Add it to the list
        affected.add(current);

        // Add it's immediate followers to the queue for processing
        Iterator<Neighbor> followerIter = current.getFollowers().iterator();
        while (followerIter.hasNext()) {
            indsToProcess.add(followerIter.next().getIndividual());
        }
    }

    /* Iterate through all the affected individuals to change them from
     * departed to remaining and tell them to cancel */
    Iterator<SpatialIndividual> affectedIter = affected.iterator();
    while (affectedIter.hasNext()) {
        SpatialIndividual current = affectedIter.next();

        //            _LOG.debug( "Processing affected ["
        //                    + current.getID()
        //                    + "]" );

        // Remove the individual from the departed group
        _departed.remove(current.getID());

        // Add it to the remaining group
        _remaining.put(current.getID(), current);

        // Tell it to cancel
        current.cancel();

    }

    /* Iterate through the list again to see if they are eligible
     * initiators.  We couldn't do it during the last pass through since
     * we hadn't cleaned up all the groups yet. */
    //        affectedIter = affected.iterator();
    affectedIter = _remaining.values().iterator();
    while (affectedIter.hasNext()) {
        SpatialIndividual current = affectedIter.next();

        // Are any of the individual's neighbors initiators or followers?
        boolean eligible = true;
        Iterator<Neighbor> neighborIter = current.getNearestNeighbors().iterator();
        while (eligible && neighborIter.hasNext()) {
            // Can tell by looking at the group ID
            Neighbor neighbor = neighborIter.next();
            if (null != neighbor.getIndividual().getGroupID()) {
                /* The neighbor belongs to a group, the individual is NOT
                 * eligible. */
                eligible = false;
            }
        }

        // Is the individual eligible?
        if (eligible) {
            // Yup
            _eligibleInitiators.put(current.getID(), current);
        } else {
            // Nope, tell them who their first mover was
            // Iterate through the list of departed individuals and
            // find the first nearest neighbor
            Iterator<SpatialIndividual> departedIter = _departed.values().iterator();
            while (departedIter.hasNext()) {
                SpatialIndividual departedInd = departedIter.next();
                if (current.isNearestNeighbor(departedInd)) {
                    current.observeFirstMover(departedInd);
                    break;
                }
            }
        }

        /* Check all the individuals not yet departed to see if they
         * observed this individual as a first mover.  If so, reset their
         * first mover if no other neighbors have departed or if another
         * has departed, set it to that neighbor */
        Iterator<SpatialIndividual> remainingIter = _remaining.values().iterator();
        while (remainingIter.hasNext()) {
            SpatialIndividual currentRemaining = remainingIter.next();
            Neighbor firstMover = currentRemaining.getFirstMover();
            if ((null != firstMover) && (firstMover.getIndividual().getID().equals(current.getID()))) {
                // Reset the first mover
                currentRemaining.resetFirstMover();

                // See if they now have another first mover
                Iterator<SpatialIndividual> departedIter = _departed.values().iterator();
                while (departedIter.hasNext()) {
                    SpatialIndividual departedInd = departedIter.next();
                    if (currentRemaining.isNearestNeighbor(departedInd)) {
                        currentRemaining.observeFirstMover(departedInd);
                        break;
                    }
                }
            }
        }
    }

    _LOG.debug("After cancel: eligibleInitiators=[" + _eligibleInitiators.size() + "] remaining=["
            + _remaining.size() + "]");
}

From source file:bwem.map.MapImpl.java

public TilePosition breadthFirstSearch(TilePosition start, Pred findCond, Pred visitCond, boolean connect8) {
    if (findCond.isTrue(getData().getTile(start), start, this)) {
        return start;
    }//www  .  jav  a  2  s  .c o  m

    final Set<TilePosition> visited = new TreeSet<>((a, b) -> {
        int result = Integer.compare(a.getX(), b.getX());
        if (result != 0) {
            return result;
        }
        return Integer.compare(a.getY(), b.getY());
    });
    Queue<TilePosition> toVisit = new ArrayDeque<>();

    toVisit.add(start);
    visited.add(start);

    TilePosition[] dir8 = { new TilePosition(-1, -1), new TilePosition(0, -1), new TilePosition(1, -1),
            new TilePosition(-1, 0), new TilePosition(1, 0), new TilePosition(-1, 1), new TilePosition(0, 1),
            new TilePosition(1, 1) };
    TilePosition[] dir4 = { new TilePosition(0, -1), new TilePosition(-1, 0), new TilePosition(+1, 0),
            new TilePosition(0, +1) };
    TilePosition[] directions = connect8 ? dir8 : dir4;

    while (!toVisit.isEmpty()) {
        TilePosition current = toVisit.remove();
        for (TilePosition delta : directions) {
            TilePosition next = current.add(delta);
            if (getData().getMapData().isValid(next)) {
                Tile nextTile = getData().getTile(next, CheckMode.NO_CHECK);
                if (findCond.isTrue(nextTile, next, this)) {
                    return next;
                }
                if (visitCond.isTrue(nextTile, next, this) && !visited.contains(next)) {
                    toVisit.add(next);
                    visited.add(next);
                }
            }
        }
    }

    //TODO: Are we supposed to return start or not?
    //        bwem_assert(false);
    throw new IllegalStateException();
    //        return start;
}