Example usage for java.util Queue isEmpty

List of usage examples for java.util Queue isEmpty

Introduction

In this page you can find the example usage for java.util Queue isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:org.mobicents.servlet.restcomm.ussd.interpreter.UssdInterpreter.java

private StringBuffer processUssdMessageTags(Queue<Tag> messageTags) {
    StringBuffer message = new StringBuffer();
    while (!messageTags.isEmpty()) {
        Tag tag = messageTags.poll();/*from w  w w . j av a2s  .  c  o  m*/
        if (tag != null) {
            message.append(tag.text());
            if (!messageTags.isEmpty())
                message.append("\n");
        } else {
            return message;
        }
    }
    return message;
}

From source file:net.sf.eventgraphj.centrality.EgoNetworkBetweennessCentrality.java

protected void computeBetweenness(Queue<V> queue, Transformer<E, ? extends Number> edge_weights) {
    for (V v : graph.getVertices()) {
        // initialize the betweenness data for this new vertex
        for (V s : graph.getVertices())
            this.vertex_data.put(s, new BetweennessData());

        //         if (v.equals(new Integer(0)))
        //            System.out.println("pause");

        vertex_data.get(v).numSPs = 1;//w  ww  .  ja v a 2  s  .  c om
        vertex_data.get(v).distance = 0;

        Stack<V> stack = new Stack<V>();
        //            Buffer<V> queue = new UnboundedFifoBuffer<V>();
        //            queue.add(v);
        queue.offer(v);

        while (!queue.isEmpty()) {
            //                V w = queue.remove();
            V w = queue.poll();
            stack.push(w);
            BetweennessData w_data = vertex_data.get(w);
            for (E e : graph.getOutEdges(w)) {
                // TODO (jrtom): change this to getOtherVertices(w, e)
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double wx_weight = edge_weights.transform(e).doubleValue();

                //                for(V x : graph.getSuccessors(w)) 
                //                {
                //                   if (x.equals(w))
                //                      continue;

                // FIXME: the other problem is that I need to 
                // keep putting the neighbors of things we've just 
                // discovered in the queue, if they're undiscovered or
                // at greater distance.

                // FIXME: this is the problem, right here, I think: 
                // need to update position in queue if distance changes
                // (which can only happen with weighted edges).
                // for each outgoing edge e from w, get other end x
                // if x not already visited (dist x < 0)
                //   set x's distance to w's dist + edge weight
                //   add x to queue; pri in queue is x's dist
                // if w's dist + edge weight < x's dist 
                //   update x's dist
                //   update x in queue (MapBinaryHeap)
                //   clear x's incoming edge list
                // if w's dist + edge weight = x's dist
                //   add e to x's incoming edge list

                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + wx_weight;
                if (x_potential_dist > this.egoNetworkSize)
                    continue;

                if (x_data.distance < 0) {
                    //                        queue.add(x);
                    //                        vertex_data.get(x).distance = vertex_data.get(w).distance + 1;
                    x_data.distance = x_potential_dist;
                    queue.offer(x);
                }

                // note:
                // (1) this can only happen with weighted edges
                // (2) x's SP count and incoming edges are updated below 
                if (x_data.distance > x_potential_dist) {
                    x_data.distance = x_potential_dist;
                    // invalidate previously identified incoming edges
                    // (we have a new shortest path distance to x)
                    x_data.incomingEdges.clear();
                    // update x's position in queue
                    ((MapBinaryHeap<V>) queue).update(x);
                }
                //                  if (vertex_data.get(x).distance == vertex_data.get(w).distance + 1) 
                // 
                //                    if (x_data.distance == x_potential_dist) 
                //                    {
                //                        x_data.numSPs += w_data.numSPs;
                ////                        vertex_data.get(x).predecessors.add(w);
                //                        x_data.incomingEdges.add(e);
                //                    }
            }
            for (E e : graph.getOutEdges(w)) {
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double e_weight = edge_weights.transform(e).doubleValue();
                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + e_weight;
                if (x_data.distance == x_potential_dist) {
                    x_data.numSPs += w_data.numSPs;
                    //                        vertex_data.get(x).predecessors.add(w);
                    x_data.incomingEdges.add(e);
                }
            }
        }
        while (!stack.isEmpty()) {
            V x = stack.pop();

            //              for (V w : vertex_data.get(x).predecessors) 
            for (E e : vertex_data.get(x).incomingEdges) {
                V w = graph.getOpposite(x, e);
                double partialDependency = vertex_data.get(w).numSPs / vertex_data.get(x).numSPs
                        * (1.0 + vertex_data.get(x).dependency);
                vertex_data.get(w).dependency += partialDependency;
                //                  E w_x = graph.findEdge(w, x);
                //                  double w_x_score = edge_scores.get(w_x).doubleValue();
                //                  w_x_score += partialDependency;
                //                  edge_scores.put(w_x, w_x_score);
                double e_score = edge_scores.get(e).doubleValue();
                edge_scores.put(e, e_score + partialDependency);
            }
            if (!x.equals(v)) {
                double x_score = vertex_scores.get(x).doubleValue();
                x_score += vertex_data.get(x).dependency;
                vertex_scores.put(x, x_score);
            }
        }
    }

    if (graph instanceof UndirectedGraph) {
        for (V v : graph.getVertices()) {
            double v_score = vertex_scores.get(v).doubleValue();
            v_score /= 2.0;
            vertex_scores.put(v, v_score);
        }
        for (E e : graph.getEdges()) {
            double e_score = edge_scores.get(e).doubleValue();
            e_score /= 2.0;
            edge_scores.put(e, e_score);
        }
    }

    vertex_data.clear();
}

From source file:at.alladin.rmbt.statisticServer.OpenTestSearchResource.java

/**
 * Gets the JSON-Response for the histograms
 * @param whereClause/*from  w  ww  . j a v  a 2s  .co  m*/
 * @param searchValues
 * @return Json as String
 */
private String getHistogram(String whereClause, Queue<Map.Entry<String, FieldType>> searchValues) {
    JSONObject ret = new JSONObject();
    try {
        if (searchValues.isEmpty()) {
            //try getting from cache
            String cacheString = (String) cache.get("opentest-histogram");
            if (cacheString != null) {
                System.out.println("cache hit for histogram");
                return cacheString;
            }
        }

        //Download
        // logarithmic if without filters
        boolean logarithmic = false;
        if (histogramInfo.max_download == Long.MIN_VALUE && histogramInfo.min_download == Long.MIN_VALUE) {

            histogramInfo.max_download = 1;
            histogramInfo.min_download = 0;
            logarithmic = true;
        }
        if (!logarithmic && histogramInfo.max_download == Long.MIN_VALUE) {
            histogramInfo.max_download = HISTOGRAMDOWNLOADDEFAULTMAX;
        }
        if (!logarithmic && histogramInfo.min_download == Long.MIN_VALUE) {
            histogramInfo.min_download = HISTOGRAMDOWNLOADDEFAULTMIN;
        }
        double min = this.histogramInfo.min_download;
        double max = this.histogramInfo.max_download;
        JSONArray downArray = getJSONForHistogram(min, max,
                (logarithmic) ? "speed_download_log" : "speed_download", logarithmic, whereClause,
                searchValues);

        ret.put("download_kbit", downArray);

        // Upload
        logarithmic = false;
        if (histogramInfo.max_upload == Long.MIN_VALUE && histogramInfo.min_upload == Long.MIN_VALUE) {
            histogramInfo.max_upload = 1;
            histogramInfo.min_upload = 0;
            logarithmic = true;
        }
        if (!logarithmic && histogramInfo.max_upload == Long.MIN_VALUE) {
            histogramInfo.max_upload = HISTOGRAMUPLOADDEFAULTMAX;
        }
        if (!logarithmic && histogramInfo.min_upload == Long.MIN_VALUE) {
            histogramInfo.min_upload = HISTOGRAMUPLOADDEFAULTMIN;
        }
        min = this.histogramInfo.min_upload;
        max = this.histogramInfo.max_upload;
        JSONArray upArray = getJSONForHistogram(min, max, (logarithmic) ? "speed_upload_log" : "speed_upload",
                logarithmic, whereClause, searchValues);

        ret.put("upload_kbit", upArray);

        //Ping
        if (histogramInfo.max_ping == Long.MIN_VALUE) {
            histogramInfo.max_ping = HISTOGRAMPINGDEFAULTMAX;
        }
        if (histogramInfo.min_ping == Long.MIN_VALUE) {
            histogramInfo.min_ping = HISTOGRAMPINGDEFAULTMIN;
        }
        min = this.histogramInfo.min_ping;
        max = this.histogramInfo.max_ping;
        JSONArray pingArray = getJSONForHistogram(min, max, "(t.ping_median::float / 1000000)", false,
                whereClause, searchValues);

        ret.put("ping_ms", pingArray);

        if (searchValues.isEmpty()) {
            //if it was the default -> save it to the cache for later
            cache.set("opentest-histogram", CACHE_EXP, ret.toString());
        }

    } catch (JSONException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return ret.toString();
}

From source file:gov.nih.nci.grididloader.BigIdCreator.java

/**
 * Create Big Id's for each entity and save them into the database.
 * Each entity is updated in parallel by several threads, but the entities
 * are processed in a serial fashion.//www. j a v a 2 s . c  o m
 */
public void createAndUpdate() throws Exception {

    if (hiFactory.getSystemType() == HandleInterfaceType.CLASSIC) {
        // Create site handle, if the database is empty.
        // This is necessary because otherwise 50 threads will try to create it
        // at once, resulting in duplicates and a subsequent avalanche of collisions
        final HandleRepositoryIDInterface idSvc = (HandleRepositoryIDInterface) hiFactory.getHandleInterface();
        // create dummy id (also creates site handle)
        ResourceIdInfo rid = new ResourceIdInfo(new URI("urn://ncicb"), "dummy");
        idSvc.createOrGetGlobalID(rid);
        // remove the id we created, the site handle will remain
        idSvc.removeGlobalID(rid);
    }

    Connection conn = null;
    FileWriter benchmarkFile = null;

    try {
        benchmarkFile = new FileWriter("timings.txt");
        conn = dataSource.getConnection();

        for (BigEntity entity : config.getEntities()) {

            final String className = entity.getClassName();
            if (!classFilter.isEmpty() && ((include && !classFilter.contains(className))
                    || (!include && classFilter.contains(className)))) {
                System.err.println("Filtered out " + className);
                continue;
            }

            long start = System.currentTimeMillis();

            final String table = entity.getTableName();
            final String id = entity.getPrimaryKey();

            Statement stmt = null;
            ResultSet rs = null;
            long numRows = 0;
            long minId = 0;
            long maxId = 0;

            try {
                // get number of rows and id space for the current entity
                stmt = conn.createStatement();
                rs = stmt.executeQuery(
                        "SELECT MIN(" + id + ") minId, MAX(" + id + ") maxId, COUNT(*) rowCount FROM " + table);
                rs.next();
                numRows = rs.getLong("rowCount");
                minId = rs.getLong("minId");
                maxId = rs.getLong("maxId");
            } catch (SQLException e) {
                System.err.println("Error processing " + table);
                e.printStackTrace();
                continue;
            } finally {
                try {
                    if (rs != null)
                        rs.close();
                    if (stmt != null)
                        stmt.close();
                } catch (SQLException e) {
                    e.printStackTrace();
                }
            }

            /* This is an overly complicated formula to figure out the best 
             * chunk size possible. 
             * 
             * First we determine the idealChunkSize for the amount of rows
             * we are dealing with, based on a linear step equation:
             *10000|   ______
             * 9500|   :
             *     |  /:
             *     | / :
             * 500 |/  :
             * ____|___:_____
             *     0   500,000
             *          
             * In other words, the minimum chunk is 500. As the number of rows 
             * increases, the chunk size grows up to 9500. But after 500000 
             * rows, the chunk size jumps to 10000 and stays constant so that 
             * we don't overload each thread. Therefore, the chunk size is 
             * always between 500 and 10000. 
             * 
             * Secondly, the identifier spread is calculated and multiplied by 
             * the idealChunkSize to get the final chunkSize. If the ids are 
             * equal to the row numbers, the spread is 1 and the chunk size is 
             * ok. If, however, the id space is gigantic, then the chunk size 
             * will be increased proportionally to the average distance between
             * ids (assuming the ids are uniformally distributed).  
             *  
             * This actually works perfectly only if the ids ARE uniformally
             * distributed. In other corner cases, where the ids are clustered
             * together within a huge id space, the id space must be
             * partitioned recursively. 
             */
            final float idealChunkSize = (numRows > 500000) ? 10000 : .018f * numRows + 500;
            final float spread = (float) (maxId - minId + 1) / (float) numRows;
            final long chunkSize = Math.round(idealChunkSize * spread);

            System.out.println("Processing " + entity + " (" + entity.getTableName() + ") rows(" + numRows
                    + ") range(" + minId + "," + maxId + ") parallel(" + entity.isParallelLoadable() + ")");
            System.out.println("Parameters: spread(" + spread + ") chunkSize(ideal=" + idealChunkSize
                    + " actual=" + chunkSize + ")");

            final Map<BatchUpdate, Future<Boolean>> futures = new HashMap<BatchUpdate, Future<Boolean>>();
            final Queue<BatchUpdate> updates = new LinkedList<BatchUpdate>();

            // start each chunk as a task on the executor
            for (long i = minId; i <= maxId; i += chunkSize) {
                BatchUpdate update = new BatchUpdate(dataSource, hiFactory, entity, i, i + chunkSize - 1);
                updates.add(update);

                Future<Boolean> future = entity.isParallelLoadable() ? parallelExecutor.submit(update)
                        : serialExecutor.submit(update);

                futures.put(update, future);
            }

            // wait for all updates to finish
            while (!updates.isEmpty()) {
                final BatchUpdate update = updates.remove();
                final Future<Boolean> future = futures.remove(update);
                try {
                    // this get() blocks until the future is available
                    Boolean success = future.get();
                    if (success == null || !success.booleanValue()) {
                        System.err.println("FAILED: " + update);
                    } else {
                        int n = update.getNumUpdated();
                        if (n == 0) {
                            System.out.println("  done " + update + " (no rows found)");
                        } else {
                            int ut = (int) update.getAverageUpdateTime();
                            int ht = (int) update.getAverageHandleTime();
                            System.out.println("  done " + update + " rows(" + n + " rows) avg(handle=" + ht
                                    + "ms, update=" + ut + "ms)");
                        }
                    }
                } catch (ExecutionException e) {
                    System.err.println("Updated failed for entity: " + entity);
                    e.printStackTrace();
                } catch (InterruptedException e) {
                    System.err.println("Updated failed for entity: " + entity);
                    e.printStackTrace();
                }
            }

            float time = System.currentTimeMillis() - start;
            System.out.println("Done " + entity + " (" + (time / 1000) + " sec)\n");
            benchmarkFile.write(entity.getClassName() + "\t" + numRows + "\t" + time + "\n");
            benchmarkFile.flush();
        }

    } finally {
        try {
            if (conn != null)
                conn.close();
            if (benchmarkFile != null)
                benchmarkFile.close();
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    // Done 
    parallelExecutor.shutdown();
    serialExecutor.shutdown();
}

From source file:it.geosolutions.geobatch.actions.commons.ExtractAction.java

/**
 * Removes TemplateModelEvents from the queue and put
 *//*from w w  w. j  a  v a 2  s  . c  om*/
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();
    listenerForwarder.setTask("build the output absolute file name");

    // return
    final Queue<EventObject> ret = new LinkedList<EventObject>();

    listenerForwarder.setTask("Building/getting the root data structure");

    boolean extractMultipleFile;
    final int size = events.size();
    if (size == 0) {
        throw new ActionException(this, "Empty file list");
    } else if (size > 1) {
        extractMultipleFile = true;
    } else {
        extractMultipleFile = false;
    }

    final File dest = conf.getDestination();

    if (dest != null && !dest.isDirectory()) {
        if (!dest.mkdirs()) {
            throw new ActionException(this, "bad destination (not writeable): " + dest);
        }
    }

    while (!events.isEmpty()) {
        listenerForwarder.setTask("Generating the output");

        final EventObject event = events.remove();
        if (event == null) {
            // TODO LOG
            continue;
        }
        if (event instanceof FileSystemEvent) {
            File source = ((FileSystemEvent) event).getSource();

            try {
                listenerForwarder.setTask("Extracting file: " + source);
                final File extracted = Extract.extract(source, getTempDir(), false);
                if (extracted != null) {
                    if (dest != null) {
                        File newDest = new File(dest, extracted.getName());
                        listenerForwarder.setTask("moving \'" + extracted + "\' to \'" + newDest + "\'");
                        FileUtils.moveDirectoryToDirectory(extracted, newDest, true);
                        listenerForwarder.terminated();
                        ret.add(new FileSystemEvent(newDest, FileSystemEventType.DIR_CREATED));
                    } else {
                        throw new ActionException(this, "Unable to extracto file: " + source);
                    }
                } else {
                    final String message = "Unable to extract " + source;
                    if (!getConfiguration().isFailIgnored()) {
                        ActionException ex = new ActionException(this.getClass(), message);
                        listenerForwarder.failed(ex);
                        throw ex;
                    } else {
                        LOGGER.warn(message);
                    }
                }
            } catch (Exception e) {
                final String message = "Unable to copy extracted archive";
                if (!getConfiguration().isFailIgnored()) {
                    ActionException ex = new ActionException(this.getClass(), message);
                    listenerForwarder.failed(ex);
                    throw ex;
                } else {
                    LOGGER.warn(e.getLocalizedMessage());
                }

            }
        } else {
            final String message = "Incoming instance is not a FileSystemEvent: " + event;
            if (!getConfiguration().isFailIgnored()) {
                ActionException ex = new ActionException(this.getClass(), message);
                listenerForwarder.failed(ex);
                throw ex;
            } else {
                LOGGER.warn(message);
            }
        }
        // TODO setup task progress
    } // endwile

    listenerForwarder.completed();
    return ret;
}

From source file:org.apache.gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java

/**
 * Get all pages in an async mode.//from   www .  j a  va 2 s . c o m
 */
private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions,
        ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess, int rowLimit)
        throws IOException {
    String country = GoogleWebmasterFilter.countryFilterToString(countryFilter);

    ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>();
    int r = 0;
    while (r <= GET_PAGES_RETRIES) {
        ++r;
        log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size()));
        ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>();
        ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils
                .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));

        while (!toProcess.isEmpty()) {
            submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound,
                    rowLimit);
        }
        //wait for jobs to finish and start next round if necessary.
        try {
            es.shutdown();
            boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES);
            if (!terminated) {
                es.shutdownNow();
                log.warn(String.format(
                        "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.",
                        country, r, nextRound.size()));
            }
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }

        if (nextRound.isEmpty()) {
            break;
        }
        toProcess = nextRound;
    }
    if (r == GET_PAGES_RETRIES) {
        throw new RuntimeException(String.format(
                "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.",
                GET_PAGES_RETRIES, startDate, endDate, country));
    }
    return allPages;
}

From source file:com.linkedin.pinot.core.startree.OffHeapStarTreeBuilder.java

/**
 * Helper method that visits each leaf node does the following:
 * - Re-orders the doc-id's corresponding to leaf node wrt time column.
 * - Create children nodes for each time value under this leaf node.
 * - Adds a new record with aggregated data for this leaf node.
 * @throws Exception/*  ww  w.j av  a  2s  .  c  om*/
 */
private void splitLeafNodesOnTimeColumn() throws Exception {
    Queue<StarTreeIndexNode> nodes = new LinkedList<>();
    nodes.add(starTreeRootIndexNode);
    StarTreeDataSorter dataSorter = new StarTreeDataSorter(dataFile, dimensionSizeBytes, metricSizeBytes);
    while (!nodes.isEmpty()) {
        StarTreeIndexNode node = nodes.remove();
        if (node.isLeaf()) {
            // If we have time column, split on time column, helps in time based filtering
            if (timeColumnName != null) {
                int level = node.getLevel();
                int[] newSortOrder = moveColumnInSortOrder(timeColumnName, getSortOrder(), level);

                int startDocId = node.getStartDocumentId();
                int endDocId = node.getEndDocumentId();
                dataSorter.sort(startDocId, endDocId, newSortOrder);
                int timeColIndex = dimensionNameToIndexMap.get(timeColumnName);
                Map<Integer, IntPair> timeColumnRangeMap = dataSorter.groupByIntColumnCount(startDocId,
                        endDocId, timeColIndex);

                node.setChildDimensionName(timeColIndex);
                node.setChildren(new HashMap<Integer, StarTreeIndexNode>());

                for (int timeValue : timeColumnRangeMap.keySet()) {
                    IntPair range = timeColumnRangeMap.get(timeValue);
                    StarTreeIndexNode child = new StarTreeIndexNode();
                    child.setDimensionName(timeColIndex);
                    child.setDimensionValue(timeValue);
                    child.setParent(node);
                    child.setLevel(node.getLevel() + 1);
                    child.setStartDocumentId(range.getLeft());
                    child.setEndDocumentId(range.getRight());
                    node.addChild(child, timeValue);
                }
            }
        } else {
            Iterator<StarTreeIndexNode> childrenIterator = node.getChildrenIterator();
            while (childrenIterator.hasNext()) {
                nodes.add(childrenIterator.next());
            }
        }
    }
    dataSorter.close();
}

From source file:bwem.map.MapImpl.java

public TilePosition breadthFirstSearch(TilePosition start, Pred findCond, Pred visitCond, boolean connect8) {
    if (findCond.isTrue(getData().getTile(start), start, this)) {
        return start;
    }//from  w w  w  .ja v a  2s  .  c  o  m

    final Set<TilePosition> visited = new TreeSet<>((a, b) -> {
        int result = Integer.compare(a.getX(), b.getX());
        if (result != 0) {
            return result;
        }
        return Integer.compare(a.getY(), b.getY());
    });
    Queue<TilePosition> toVisit = new ArrayDeque<>();

    toVisit.add(start);
    visited.add(start);

    TilePosition[] dir8 = { new TilePosition(-1, -1), new TilePosition(0, -1), new TilePosition(1, -1),
            new TilePosition(-1, 0), new TilePosition(1, 0), new TilePosition(-1, 1), new TilePosition(0, 1),
            new TilePosition(1, 1) };
    TilePosition[] dir4 = { new TilePosition(0, -1), new TilePosition(-1, 0), new TilePosition(+1, 0),
            new TilePosition(0, +1) };
    TilePosition[] directions = connect8 ? dir8 : dir4;

    while (!toVisit.isEmpty()) {
        TilePosition current = toVisit.remove();
        for (TilePosition delta : directions) {
            TilePosition next = current.add(delta);
            if (getData().getMapData().isValid(next)) {
                Tile nextTile = getData().getTile(next, CheckMode.NO_CHECK);
                if (findCond.isTrue(nextTile, next, this)) {
                    return next;
                }
                if (visitCond.isTrue(nextTile, next, this) && !visited.contains(next)) {
                    toVisit.add(next);
                    visited.add(next);
                }
            }
        }
    }

    //TODO: Are we supposed to return start or not?
    //        bwem_assert(false);
    throw new IllegalStateException();
    //        return start;
}

From source file:it.geosolutions.geobatch.unredd.script.publish.PublishingAction.java

/**
 * Main loop on input files. Single file processing is called on
 * execute(File xmlFile)//from  w  w  w . ja  v  a2 s  .  c  om
 */
public Queue<FileSystemEvent> execute(Queue<FileSystemEvent> events) throws ActionException {

    // ****************************************
    // initialize PostGISUtils, Geostore and paths
    //
    // ****************************************

    try {
        initialize();
    } catch (Exception e) {
        LOGGER.error("Exception during component initialization", e);
        throw new ActionException(this, "Exception during initialization");
    }

    final Queue<FileSystemEvent> ret = new LinkedList<FileSystemEvent>();
    while (!events.isEmpty()) {
        final FileSystemEvent ev = events.remove();

        try {
            if (ev != null) {
                if (LOGGER.isTraceEnabled()) {
                    LOGGER.trace("PublishingAction.execute(): working on incoming event: " + ev.getSource());
                }

                File xmlFile = ev.getSource(); // this is the input xml file
                executeInternal(xmlFile);
                ret.add(new FileSystemEvent(xmlFile, FileSystemEventType.FILE_ADDED));

            } else {
                LOGGER.error("PublishingAction.execute(): Encountered a NULL event: SKIPPING...");
                continue;
            }

        } catch (ActionException ex) { // ActionEx have already been processed
            LOGGER.error(ex.getMessage(), ex);
            throw ex;

        } catch (Exception ex) {
            final String message = "PublishingAction.execute(): Unable to produce the output: "
                    + ex.getLocalizedMessage();
            LOGGER.error(message, ex);
            throw new ActionException(this, message);
        }
    }

    return ret;
}

From source file:org.exoplatform.ecm.webui.component.explorer.rightclick.manager.DeleteManageComponent.java

private void processRemoveOrMoveToTrash(String nodePath, Node node, Event<?> event, boolean isMultiSelect,
        boolean checkToMoveToTrash) throws Exception {
    if (!checkToMoveToTrash || Utils.isInTrash(node))
        processRemoveNode(nodePath, node, event, isMultiSelect);
    else {//ww w .j  a v  a  2s  . c o  m
        WCMComposer wcmComposer = WCMCoreUtils.getService(WCMComposer.class);
        List<Node> categories = WCMCoreUtils.getService(TaxonomyService.class).getAllCategories(node);

        String parentPath = node.getParent().getPath();
        String parentWSpace = node.getSession().getWorkspace().getName();

        wcmComposer.updateContent(parentWSpace, node.getPath(), new HashMap<String, String>());
        boolean isNodeReferenceable = Utils.isReferenceable(node);
        String nodeUUID = null;
        if (isNodeReferenceable)
            nodeUUID = node.getUUID();
        boolean moveOK = moveToTrash(nodePath, node, event, isMultiSelect);
        if (moveOK) {
            for (Node categoryNode : categories) {
                wcmComposer.updateContents(categoryNode.getSession().getWorkspace().getName(),
                        categoryNode.getPath(), new HashMap<String, String>());
            }
            PortletRequestContext pcontext = (PortletRequestContext) WebuiRequestContext.getCurrentInstance();

            PortletPreferences portletPref = pcontext.getRequest().getPreferences();

            String trashWorkspace = portletPref.getValue(Utils.TRASH_WORKSPACE, "");
            if (isNodeReferenceable) {
                wcmComposer.updateContent(trashWorkspace, nodeUUID, new HashMap<String, String>());
            }
            wcmComposer.updateContents(parentWSpace, parentPath, new HashMap<String, String>());
            //Broadcast the event when user move node to Trash
            ListenerService listenerService = WCMCoreUtils.getService(ListenerService.class);
            ActivityCommonService activityService = WCMCoreUtils.getService(ActivityCommonService.class);
            Node parent = node.getParent();
            if (node.getPrimaryNodeType().getName().equals(NodetypeConstant.NT_FILE)) {
                if (activityService.isBroadcastNTFileEvents(node)) {
                    listenerService.broadcast(ActivityCommonService.FILE_REMOVE_ACTIVITY, parent, node);
                }
            } else if (!isDocumentNodeType(node)) {
                Queue<Node> queue = new LinkedList<Node>();
                queue.add(node);

                //Broadcast event to remove file activities
                Node tempNode = null;
                try {
                    while (!queue.isEmpty()) {
                        tempNode = queue.poll();
                        if (isDocumentNodeType(tempNode)
                                || tempNode.getPrimaryNodeType().getName().equals(NodetypeConstant.NT_FILE)) {
                            listenerService.broadcast(ActivityCommonService.FILE_REMOVE_ACTIVITY,
                                    tempNode.getParent(), tempNode);
                        } else {
                            for (NodeIterator iter = tempNode.getNodes(); iter.hasNext();) {
                                Node childNode = iter.nextNode();
                                if (isDocumentNodeType(childNode)
                                        || childNode.isNodeType(NodetypeConstant.NT_UNSTRUCTURED)
                                        || childNode.isNodeType(NodetypeConstant.NT_FOLDER))
                                    queue.add(childNode);
                            }
                        }
                    }
                } catch (Exception e) {
                    if (LOG.isWarnEnabled()) {
                        LOG.warn(e.getMessage());
                    }
                }

            }
        }
    }
}