Example usage for java.util Queue remove

List of usage examples for java.util Queue remove

Introduction

In this page you can find the example usage for java.util Queue remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of this queue.

Usage

From source file:de.huberlin.wbi.hiway.logstats.LogParser.java

private void expandHiwayEvents() throws JSONException {
    Queue<JsonReportEntry> execQ = new LinkedList<>();
    Map<String, JsonReportEntry> allocatedMap = new HashMap<>();
    Queue<JsonReportEntry> completedQ = new LinkedList<>();

    for (JsonReportEntry entry : entries) {
        switch (entry.getKey()) {
        case JsonReportEntry.KEY_INVOC_EXEC:
            execQ.add(entry);// w  w w.  j  av a  2s . c  o  m
            break;
        case JsonReportEntry.KEY_INVOC_TIME:
            JsonReportEntry completed = completedQ.remove();
            JSONObject value = completed.getValueJsonObj();
            JsonReportEntry allocated = allocatedMap.get(value.getString("container-id"));
            expandEntry(completed, entry);
            expandEntry(allocated, entry);
            break;
        case HiwayDBI.KEY_HIWAY_EVENT:
            value = entry.getValueJsonObj();
            switch (value.getString("type")) {
            case "container-requested":
                expandEntry(entry, execQ.remove());
                break;
            case "container-allocated":
                allocatedMap.put(value.getString("container-id"), entry);
                break;
            case "container-completed":
                completedQ.add(entry);
                break;
            default:
            }
            break;
        default:
        }
    }
}

From source file:it.scoppelletti.mobilepower.app.FragmentLayoutController.java

/**
 * Ricostruisce la successione dei frammenti nell&rsquo;unico pannello.
 * /*w  w  w . ja  v a  2s .c o m*/
 * @param  fragmentMgr   Gestore dei frammenti.
 * @param  fragmentQueue Frammenti.
 * @return               Identificatore dell&rsquo;ultimo elemento inserito
 *                       nel back stack. 
 */
private int arrangePanel(FragmentManager fragmentMgr,
        Queue<FragmentLayoutController.FragmentEntry> fragmentQueue) {
    int tnId, lastTnId;
    FragmentLayoutController.FragmentEntry entry;
    FragmentTransaction fragmentTn = null;

    lastTnId = -1;
    while (!fragmentQueue.isEmpty()) {
        tnId = -1;
        entry = fragmentQueue.remove();

        try {
            fragmentTn = fragmentMgr.beginTransaction();

            fragmentTn.replace(myFrameIds[0], entry.getFragment().asFragment(), entry.getTag());

            fragmentTn.addToBackStack(null);
        } finally {
            if (fragmentTn != null) {
                tnId = fragmentTn.commit();
                fragmentTn = null;
            }
        }

        if (tnId >= 0) {
            lastTnId = tnId;
        }
    }

    return lastTnId;
}

From source file:it.geosolutions.geobatch.actions.ds2ds.Ds2dsAction.java

/**
* Imports data from the source DataStore to the output one
* transforming the data as configured.//  w  w  w  .ja  v  a 2s .co  m
 */
@Override
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    // return object
    final Queue<EventObject> outputEvents = new LinkedList<EventObject>();

    while (events.size() > 0) {
        final EventObject ev;
        try {
            if ((ev = events.remove()) != null) {
                listenerForwarder.started();

                updateTask("Working on incoming event: " + ev.getSource());

                Queue<FileSystemEvent> acceptableFiles = acceptableFiles(unpackCompressedFiles(ev));
                if (ev instanceof FileSystemEvent
                        && ((FileSystemEvent) ev).getEventType().equals(FileSystemEventType.POLLING_EVENT)) {
                    String fileType = getFileType((FileSystemEvent) ev);
                    EventObject output = null;
                    if ("feature".equalsIgnoreCase(fileType)) {
                        configuration.getOutputFeature().setTypeName(
                                FilenameUtils.getBaseName(((FileSystemEvent) ev).getSource().getName()));
                        output = buildOutputEvent();
                        updateImportProgress(1, 1, "Completed");
                    } else {
                        output = importFile((FileSystemEvent) ev);
                    }

                    outputEvents.add(output);
                } else {
                    if (acceptableFiles.size() == 0) {
                        failAction("No file to process");
                    } else {
                        List<ActionException> exceptions = new ArrayList<ActionException>();
                        for (FileSystemEvent fileEvent : acceptableFiles) {
                            try {
                                String fileType = getFileType(fileEvent);
                                EventObject output = null;
                                if ("feature".equalsIgnoreCase(fileType)) {
                                    configuration.getOutputFeature().setTypeName(FilenameUtils
                                            .getBaseName(((FileSystemEvent) ev).getSource().getName()));
                                    output = buildOutputEvent();
                                    updateImportProgress(1, 1, "Completed");
                                } else {
                                    output = importFile(fileEvent);
                                }
                                if (output != null) {
                                    // add the event to the return
                                    outputEvents.add(output);
                                } else {
                                    if (LOGGER.isWarnEnabled()) {
                                        LOGGER.warn("No output produced");
                                    }
                                }
                            } catch (ActionException e) {
                                exceptions.add(e);
                            }

                        }
                        if (acceptableFiles.size() == exceptions.size()) {
                            throw new ActionException(this, exceptions.get(0).getMessage());
                        } else if (exceptions.size() > 0) {
                            if (LOGGER.isWarnEnabled()) {
                                for (ActionException ex : exceptions) {
                                    LOGGER.warn("Error in action: " + ex.getMessage());
                                }
                            }
                        }
                    }
                }

            } else {
                if (LOGGER.isErrorEnabled()) {
                    LOGGER.error("Encountered a NULL event: SKIPPING...");
                }
                continue;
            }
        } catch (ActionException ioe) {
            failAction("Unable to produce the output, " + ioe.getLocalizedMessage(), ioe);
        } catch (Exception ioe) {
            failAction("Unable to produce the output: " + ioe.getLocalizedMessage(), ioe);
        }
    }
    return outputEvents;
}

From source file:com.liveramp.hank.partition_server.UpdateManager.java

@Override
public void update() throws IOException {
    HankTimer timer = new HankTimer();
    try {/*from w ww  .  ja  va  2s. c  o m*/

        // Delete unknown files
        deleteUnknownFiles();
        // Perform update
        Semaphore concurrentUpdatesSemaphore = new Semaphore(configurator.getNumConcurrentUpdates());
        List<Throwable> encounteredThrowables = new ArrayList<Throwable>();
        PartitionUpdateTaskStatisticsAggregator partitionUpdateTaskStatisticsAggregator = new PartitionUpdateTaskStatisticsAggregator();
        Map<String, Queue<PartitionUpdateTask>> dataDirectoryToUpdateTasks = new HashMap<String, Queue<PartitionUpdateTask>>();
        List<PartitionUpdateTask> allUpdateTasks = buildPartitionUpdateTasks(
                partitionUpdateTaskStatisticsAggregator, encounteredThrowables);
        // Build and organize update tasks per data directory
        for (PartitionUpdateTask updateTask : allUpdateTasks) {
            String dataDirectory = updateTask.getDataDirectory();
            Queue<PartitionUpdateTask> updateTasks = dataDirectoryToUpdateTasks.get(dataDirectory);
            if (updateTasks == null) {
                updateTasks = new LinkedList<PartitionUpdateTask>();
                dataDirectoryToUpdateTasks.put(dataDirectory, updateTasks);
            }
            updateTasks.add(updateTask);
        }

        // Logging
        LOG.info("Number of update tasks: " + allUpdateTasks.size());
        for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) {
            LOG.info("Number of update tasks scheduled in " + entry.getKey() + ": " + entry.getValue().size());
        }

        // Build executor services
        Map<String, ExecutorService> dataDirectoryToExecutorService = new HashMap<String, ExecutorService>();
        for (String dataDirectory : dataDirectoryToUpdateTasks.keySet()) {
            dataDirectoryToExecutorService.put(dataDirectory,
                    new UpdateThreadPoolExecutor(configurator.getMaxConcurrentUpdatesPerDataDirectory(),
                            new UpdaterThreadFactory(dataDirectory), concurrentUpdatesSemaphore));
        }

        LOG.info("Submitting update tasks for " + dataDirectoryToUpdateTasks.size() + " directories.");

        // Execute tasks. We execute one task for each data directory and loop around so that the tasks
        // attempt to acquire the semaphore in a reasonable order.
        boolean remaining = true;
        while (remaining) {
            remaining = false;
            for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) {
                // Pop next task
                Queue<PartitionUpdateTask> partitionUpdateTasks = entry.getValue();
                if (!partitionUpdateTasks.isEmpty()) {
                    PartitionUpdateTask partitionUpdateTask = partitionUpdateTasks.remove();
                    // Execute task
                    dataDirectoryToExecutorService.get(entry.getKey()).execute(partitionUpdateTask);
                }
                if (!partitionUpdateTasks.isEmpty()) {
                    remaining = true;
                }
            }
        }

        LOG.info("All update tasks submitted, shutting down executor services");

        // Shutdown executors
        for (ExecutorService executorService : dataDirectoryToExecutorService.values()) {
            executorService.shutdown();
        }

        LOG.info("Waiting for executors to finish.");

        // Wait for executors to finish
        for (Map.Entry<String, ExecutorService> entry : dataDirectoryToExecutorService.entrySet()) {
            String directory = entry.getKey();
            ExecutorService executorService = entry.getValue();

            boolean keepWaiting = true;
            while (keepWaiting) {
                try {
                    LOG.info("Waiting for updates to complete on data directory: " + directory);
                    boolean terminated = executorService.awaitTermination(
                            UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_VALUE,
                            UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_UNIT);
                    if (terminated) {
                        // We finished executing all tasks
                        // Otherwise, timeout elapsed and current thread was not interrupted. Keep waiting.
                        LOG.info("Finished updates for directory: " + directory);
                        keepWaiting = false;
                    }
                    // Record update ETA
                    Hosts.setUpdateETA(host, partitionUpdateTaskStatisticsAggregator.computeETA());
                } catch (InterruptedException e) {
                    // Received interruption (stop request).
                    // Swallow the interrupted state and ask the executor to shutdown immediately. Also, keep waiting.
                    LOG.info(
                            "The update manager was interrupted. Stopping the update process (stop executing new partition update tasks"
                                    + " and wait for those that were running to finish).");
                    // Shutdown all executors
                    for (ExecutorService otherExecutorService : dataDirectoryToExecutorService.values()) {
                        otherExecutorService.shutdownNow();
                    }
                    // Record failed update exception (we need to keep waiting)
                    encounteredThrowables.add(
                            new IOException("Failed to complete update: update interruption was requested."));
                }
            }
        }

        LOG.info("All executors have finished updates");

        // Shutdown all executors
        for (ExecutorService executorService : dataDirectoryToExecutorService.values()) {
            executorService.shutdownNow();
        }

        LOG.info("Finished with " + encounteredThrowables.size() + " errors.");

        // Detect failures
        if (!encounteredThrowables.isEmpty()) {
            LOG.error(String.format("%d exceptions encountered while running partition update tasks:",
                    encounteredThrowables.size()));
            int i = 0;
            for (Throwable t : encounteredThrowables) {
                LOG.error(String.format("Exception %d/%d:", ++i, encounteredThrowables.size()), t);
            }
            throw new IOException(String.format(
                    "Failed to complete update: %d exceptions encountered while running partition update tasks.",
                    encounteredThrowables.size()));
        }

        // Garbage collect useless host domains
        garbageCollectHostDomains(host);

        // Log statistics
        partitionUpdateTaskStatisticsAggregator.logStats();

    } catch (IOException e) {
        LOG.info("Update failed and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000));
        throw e;
    }
    LOG.info("Update succeeded and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000));
}

From source file:it.geosolutions.geobatch.actions.commons.CollectorAction.java

/**
 * Removes TemplateModelEvents from the queue and put
 *///from w w w  . j  a  v  a2s .  c  o m
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();
    listenerForwarder.setTask("build the output absolute file name");

    // return
    final Queue<EventObject> ret = new LinkedList<EventObject>();

    listenerForwarder.setTask("Building/getting the root data structure");

    if (conf.getWildcard() == null) {
        LOGGER.warn("Null wildcard: using default\'*\'");
        conf.setWildcard("*");
    }

    it.geosolutions.tools.io.file.Collector collector = new it.geosolutions.tools.io.file.Collector(
            new WildcardFileFilter(conf.getWildcard(), IOCase.INSENSITIVE), conf.getDeep());
    while (!events.isEmpty()) {

        final EventObject event = events.remove();
        if (event == null) {
            // TODO LOG
            continue;
        }
        File source = null;
        if (event.getSource() instanceof File) {
            source = ((File) event.getSource());
        }

        if (source == null || !source.exists()) {
            // LOG
            continue;
        }
        listenerForwarder.setTask("Collecting from" + source);

        List<File> files = collector.collect(source);
        if (files == null) {
            return ret;
        }
        for (File file : files) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug("Collected file: " + file);
            }
            ret.add(new FileSystemEvent(file, FileSystemEventType.FILE_ADDED));
        }

    }
    listenerForwarder.completed();
    return ret;
}

From source file:it.geosolutions.geobatch.opensdi.ndvi.NDVIStatsAction.java

/**
 * Execute process/*  www  . jav a 2s  .  co m*/
 */
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    // return object
    final Queue<EventObject> ret = new LinkedList<EventObject>();

    while (events.size() > 0) {
        final EventObject ev;
        try {
            if ((ev = events.remove()) != null) {
                if (LOGGER.isTraceEnabled()) {
                    LOGGER.trace("Working on incoming event: " + ev.getSource());
                }
                if (ev instanceof FileSystemEvent) {
                    FileSystemEvent fileEvent = (FileSystemEvent) ev;
                    File file = fileEvent.getSource();
                    processXMLFile(file);
                }

                // add the event to the return
                ret.add(ev);

            } else {
                if (LOGGER.isErrorEnabled()) {
                    LOGGER.error("Encountered a NULL event: SKIPPING...");
                }
                continue;
            }
        } catch (Exception ioe) {
            final String message = "Unable to produce the output: " + ioe.getLocalizedMessage();
            if (LOGGER.isErrorEnabled())
                LOGGER.error(message, ioe);

            throw new ActionException(this, message);
        }
    }
    return ret;
}

From source file:it.geosolutions.geobatch.unredd.script.ingestion.IngestionAction.java

/**
 * Main loop on input files.// w  ww  .j  a  v a 2s .c  o  m
 * Single file processing is called on execute(File inputZipFile)
 */
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    final Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();
    LOGGER.warn("Ingestion flow running");

    while (!events.isEmpty()) {
        final FileSystemEvent ev = events.remove();

        try {
            if (ev != null) {
                if (LOGGER.isTraceEnabled()) {
                    LOGGER.trace("Processing incoming event: " + ev.getSource());
                }

                File inputZipFile = ev.getSource(); // this is the input zip file
                File out = execute(inputZipFile);
                ret.add(new FileSystemEvent(out, FileSystemEventType.FILE_ADDED));

            } else {
                LOGGER.error("NULL event: skipping...");
                continue;
            }

        } catch (ActionException ex) { // ActionEx have already been processed
            LOGGER.error(ex.getMessage(), ex);
            throw ex;

        } catch (Exception ex) {
            final String message = "GeostoreAction.execute(): Unable to produce the output: "
                    + ex.getLocalizedMessage();
            LOGGER.error(message, ex);
            throw new ActionException(this, message);
        }
    }

    return ret;
}

From source file:net.sf.nmedit.jpatch.impl.PBasicConnectionManager.java

public Collection<PConnector> graph(PConnector c) {
    Node n = nodemap.get(c);//from ww w. j  a  va 2  s.co  m
    if (n == null)
        return Collections.<PConnector>emptyList();

    Queue<Node> queue = new LinkedList<Node>();
    Collection<PConnector> g = new LinkedList<PConnector>();
    queue.offer(n.root());
    while (!queue.isEmpty()) {
        n = queue.remove();
        g.add(n.c);
        n.addChildNodes(queue);
    }
    return Collections.<PConnector>unmodifiableCollection(g);
}

From source file:net.sf.nmedit.jpatch.impl.PBasicConnectionManager.java

public Collection<PConnection> graphConnections(PConnector c) {
    Node n = nodemap.get(c);/*from  w  w w . j ava  2s  . c om*/
    if (n == null)
        return Collections.<PConnection>emptyList();

    Queue<Node> queue = new LinkedList<Node>();
    Collection<PConnection> g = new LinkedList<PConnection>();
    n.addChildNodes(queue);
    while (!queue.isEmpty()) {
        n = queue.remove();
        g.add(new PConnection(n.c, n.parent()));
        n.addChildNodes(queue);
    }
    return Collections.<PConnection>unmodifiableCollection(g);
}

From source file:Graph.java

public void bfs(Queue<Node> q) {
    clearState();/*from  w w  w. j  av  a2 s .  c o m*/
    for (Node n : nodes) {
        n.setState(q.contains(n) ? GRAY : WHITE);
    }
    for (Edge e : edges) {
        e.setMode(UNKNOWN);
    }
    for (Node n : q) {
        n.setDistance(0);
        n.setPredecessor(null);
    }
    while (!q.isEmpty()) {
        Node n = q.remove();
        List<Node> out = findNextNodes(n);
        for (Node m : out) {
            Edge e = findEdge(n, m);
            if (e != null) {
                if (m.getState() == WHITE) {
                    e.setMode(TREE);
                } else if (m.getState() == GRAY) {
                    e.setMode(BACK);
                }
            }
            if (!m.isVisited()) {
                m.setDistance(n.getDistance() + 1);
                m.setPredecessor(n);
                m.setState(GRAY);
                q.offer(m);
            }
        }
        n.setState(BLACK);
    }
    searchState = STATE_BFS;
}