Example usage for java.util Queue size

List of usage examples for java.util Queue size

Introduction

In this page you can find the example usage for java.util Queue size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this collection.

Usage

From source file:org.glassfish.jersey.examples.sseitemstore.jersey.JerseyItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *//*w w  w . jav  a2s .c o  m*/
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final EventSource[] sources = new EventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (inboundEvent.getName() == null) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);

        for (String item : items) {
            postItem(itemsTarget, item);
        }

        assertTrue("Waiting to receive all events has timed out.",
                latch.await(
                        (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    for (String item : items) {
        assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item));
    }

    int queueId = 0;
    for (Queue<Integer> indexes : indexQueues) {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId,
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId, items.size(),
                indexes.size());
        queueId++;
    }

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:de.tudarmstadt.ukp.dkpro.lexsemresource.graph.EntityGraphJGraphT.java

/**
 * Creates the hyponym map, that maps from nodes to their (recursive) number of hyponyms for
 * each node. "recursive" means that the hyponyms of hyponyms are also taken into account.
 *
 * @throws UnsupportedOperationException
 * @throws LexicalSemanticResourceException
 *//*from   w  ww  . j  ava 2 s  . c o  m*/
private Map<String, Integer> getHyponymCountMap() throws LexicalSemanticResourceException {
    // do only create hyponymMap, if it was not already computed
    if (hyponymCountMap != null) {
        return hyponymCountMap;
    }

    // work on the lcc, otherwise this is not going to work
    // EntityGraphJGraphT lcc = this;
    EntityGraphJGraphT lcc = this.getLargestConnectedComponent();
    lcc.removeCycles();
    int nrOfNodes = lcc.getNumberOfNodes();

    File hyponymCountMapSerializedFile = new File(
            getGraphId() + "_" + hyponymCountMapFilename + (lexSemRes.getIsCaseSensitive() ? "-cs" : "-cis"));
    hyponymCountMap = new HashMap<String, Integer>();

    if (hyponymCountMapSerializedFile.exists()) {
        logger.info("Loading saved hyponymyCountMap ...");
        hyponymCountMap = EntityGraphUtils.deserializeMap(hyponymCountMapSerializedFile);
        if (hyponymCountMap.size() != nrOfNodes) {
            throw new LexicalSemanticResourceException(
                    "HyponymCountMap does not contain an entry for each node in the graph."
                            + hyponymCountMap.size() + "/" + nrOfNodes);
        }
        logger.info("Done loading saved hyponymyCountMap");
        return hyponymCountMap;
    }

    hyponymCountMap = new HashMap<String, Integer>();

    // a queue holding the nodes to process
    Queue<String> queue = new LinkedList<String>();

    // In the entity graph a node may have more than one father.
    // Thus, we check whether a node was already visited.
    // Then, it is not expanded again.
    Set<String> visited = new HashSet<String>();

    // initialize the queue with all leaf nodes
    Set<String> leafNodes = new HashSet<String>();
    for (Entity leaf : lcc.getLeaves()) {
        leafNodes.add(leaf.getId());
    }
    queue.addAll(leafNodes);

    logger.info(leafNodes.size() + " leaf nodes.");

    ProgressMeter progress = new ProgressMeter(getNumberOfNodes());
    // while the queue is not empty
    while (!queue.isEmpty()) {
        // remove first element from queue
        String currNodeId = queue.poll();
        Entity currNode = lexSemRes.getEntityById(currNodeId);

        // in some rare cases, getEntityById might fail - so better check for nulls and fail
        // gracefully
        if (currNode == null) {
            visited.add(currNodeId);
            hyponymCountMap.put(currNodeId, 0);
        }

        logger.debug(queue.size());

        if (visited.contains(currNodeId)) {
            continue;
        }

        progress.next();

        if (logger.isDebugEnabled()) {
            logger.debug(progress + " - " + queue.size() + " left in queue");
        } else if (logger.isInfoEnabled() && (progress.getCount() % 100 == 0)) {
            logger.info(progress + " - " + queue.size() + " left in queue");
        }

        Set<Entity> children = lcc.getChildren(currNode);
        Set<String> invalidChildIds = new HashSet<String>();
        int validChildren = 0;
        int sumChildHyponyms = 0;
        boolean invalid = false;
        for (Entity child : children) {
            if (lcc.containsVertex(child)) {
                if (hyponymCountMap.containsKey(child.getId())) {
                    sumChildHyponyms += hyponymCountMap.get(child.getId());
                    validChildren++;
                } else {
                    invalid = true;
                    invalidChildIds.add(child.getId());
                }
            }
        }

        // we cannot use continue directly if invalid as this would continue the inner loop not
        // the outer loop
        if (invalid) {
            // One of the childs is not in the hyponymCountMap yet
            // Re-Enter the node into the queue and continue with next node
            // Also enter all the childs that are not in the queue yet
            queue.add(currNodeId);
            for (String childId : invalidChildIds) {
                if (!visited.contains(childId) && !queue.contains(childId)) {
                    queue.add(childId);
                }
            }
            continue;
        }

        // mark as visited
        visited.add(currNodeId);

        // number of hyponomys of current node is the number of its own hyponyms and the sum of
        // the hyponyms of its children.
        int currNodeHyponomyCount = validChildren + sumChildHyponyms;
        hyponymCountMap.put(currNodeId, currNodeHyponomyCount);

        // add parents of current node to queue
        for (Entity parent : lcc.getParents(currNode)) {
            if (lcc.containsVertex(parent)) {
                queue.add(parent.getId());
            }
        }
    } // while queue not empty

    logger.info(visited.size() + " nodes visited");
    if (visited.size() != nrOfNodes) {
        List<Entity> missed = new ArrayList<Entity>();
        for (Entity e : lcc.getNodes()) {
            if (!visited.contains(e.getId())) {
                missed.add(e);
                System.out.println("Missed: [" + e + "]");
            }
        }

        throw new LexicalSemanticResourceException(
                "Visited only " + visited.size() + " out of " + nrOfNodes + " nodes.");
    }
    if (hyponymCountMap.size() != nrOfNodes) {
        throw new LexicalSemanticResourceException(
                "HyponymCountMap does not contain an entry for each node in the graph." + hyponymCountMap.size()
                        + "/" + nrOfNodes);
    }

    /*
     * As an EntityGraph is a graph rather than a tree, the hyponymCount for top nodes can be
     * greater than the number of nodes in the graph. This is due to the multiple counting of nodes
     * having more than one parent. Thus, we have to scale hyponym counts to fall in
     * [0,NumberOfNodes].
     */
    for (String key : hyponymCountMap.keySet()) {
        if (hyponymCountMap.get(key) > hyponymCountMap.size()) {
            // TODO scaling function is not optimal (to say the least :)
            hyponymCountMap.put(key, (hyponymCountMap.size() - 1));
        }
    }

    logger.info("Computed hyponymCountMap");
    EntityGraphUtils.serializeMap(hyponymCountMap, hyponymCountMapSerializedFile);
    logger.info("Serialized hyponymCountMap");

    return hyponymCountMap;
}

From source file:org.glassfish.jersey.examples.sseitemstore.ItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *///  w  w w. j  a  v a 2s . com
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<Queue<Integer>>(MAX_LISTENERS);
    final EventSource[] sources = new EventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<Integer>();
        indexQueues.add(indexes);

        es.register(new EventListener() {
            @SuppressWarnings("MagicNumber")
            @Override
            public void onEvent(InboundEvent inboundEvent) {
                try {
                    if (inboundEvent.getName() == null) {
                        final String data = inboundEvent.readData();
                        LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId()
                                + " data=" + data);
                        indexes.add(items.indexOf(data));
                    } else if ("size".equals(inboundEvent.getName())) {
                        sizeEventsCount.incrementAndGet();
                    }
                } catch (Exception ex) {
                    LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                    indexes.add(-999);
                } finally {
                    latch.countDown();
                }
            }
        });
    }

    try {
        open(sources);

        for (String item : items) {
            postItem(itemsTarget, item);
        }

        assertTrue("Waiting to receive all events has timed out.",
                latch.await(
                        (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    for (String item : items) {
        assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item));
    }

    int queueId = 0;
    for (Queue<Integer> indexes : indexQueues) {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId,
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId, items.size(),
                indexes.size());
        queueId++;
    }

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:org.springframework.data.redis.connection.DefaultStringRedisConnection.java

@SuppressWarnings({ "unchecked", "rawtypes" })
private List<Object> convertResults(List<Object> results, Queue<Converter> converters) {
    if (!deserializePipelineAndTxResults || results == null) {
        return results;
    }/* www  . ja va2  s  .com*/
    if (results.size() != converters.size()) {
        // Some of the commands were done directly on the delegate, don't attempt to convert
        log.warn("Delegate returned an unexpected number of results. Abandoning type conversion.");
        return results;
    }
    List<Object> convertedResults = new ArrayList<Object>();
    for (Object result : results) {
        convertedResults.add(converters.remove().convert(result));
    }
    return convertedResults;
}

From source file:de.innovationgate.wgpublisher.lucene.LuceneManager.java

public long getRemainingAdditionRequests() {

    int remaining = 0;
    for (Queue<IndexingRequest> req : _additionRequestsMap.values()) {
        remaining += req.size();
    }// w  w  w . j  a va  2s.  c  om
    return remaining;
}

From source file:de.innovationgate.wgpublisher.lucene.LuceneManager.java

public long getRemainingDeletionRequests() {

    int remaining = 0;
    for (Queue<IndexingRequest> req : _deletionRequestsMap.values()) {
        remaining += req.size();
    }/*from   w ww  .j a  va  2 s.  c om*/
    return remaining;
}

From source file:edu.emory.cci.aiw.umls.UMLSDatabaseConnection.java

@Override
public int getDistBF(ConceptUID cui1, ConceptUID cui2, String rela, SAB sab, int maxR)
        throws UMLSQueryException {
    Queue<ConceptUID> cuiQue = new LinkedList<ConceptUID>();
    Set<ConceptUID> visited = new HashSet<ConceptUID>();
    Map<Integer, Integer> radiusIdx = new HashMap<Integer, Integer>();
    int queIdx = 0;
    int r = 0;/*from w w w. j  a v a2s.c o  m*/
    radiusIdx.put(r, 0);

    if (maxR <= 0) {
        maxR = 3;
    }

    try {
        setupConn();
        cuiQue.add(cui1);
        visited.add(cui1);

        List<UMLSQuerySearchUID> params = new ArrayList<UMLSQuerySearchUID>();
        StringBuilder sql = new StringBuilder(
                "select distinct(CUI2) from MRREL where CUI1 = ? and (rel='PAR' or rel='CHD')");
        params.add(ConceptUID.EMPTY_CUI);
        if (sab != null) {
            sql.append(" and SAB = ?");
            params.add(sab);
        }
        if (rela != null && !rela.equals("")) {
            sql.append(" and RELA = ?");
            params.add(UMLSQueryStringValue.fromString(rela));
        }

        while (!cuiQue.isEmpty()) {
            ConceptUID node = cuiQue.remove();
            params.set(0, node);
            if (node.equals(cui2)) {
                return r;
            }

            List<ConceptUID> adjNodes = new ArrayList<ConceptUID>();

            ResultSet rs = executeAndLogQuery(substParams(sql.toString(), params));
            while (rs.next()) {
                ConceptUID c2 = ConceptUID.fromString(rs.getString(1));
                if (!visited.contains(c2)) {
                    adjNodes.add(c2);
                }
            }

            if (!radiusIdx.containsKey(r + 1)) {
                radiusIdx.put(r + 1, queIdx + cuiQue.size());
            }
            radiusIdx.put(r + 1, adjNodes.size());

            if (queIdx == radiusIdx.get(r)) {
                r++;
            }
            queIdx++;

            for (ConceptUID c : adjNodes) {
                visited.add(c);
                cuiQue.add(c);
            }
            if (r > maxR) {
                return r;
            }
        }
    } catch (SQLException sqle) {
        throw new UMLSQueryException(sqle);
    } catch (MalformedUMLSUniqueIdentifierException muuie) {
        throw new UMLSQueryException(muuie);
    } finally {
        tearDownConn();
    }

    log(Level.FINEST, "Returning -1");
    return -1;
}

From source file:com.clxcommunications.xms.ApiConnectionIT.java

/**
 * Verifies that the default HTTP client actually can handle multiple
 * simultaneous requests.//from   w w w. j a  va  2s  . co m
 * 
 * @throws Exception
 *             shouldn't happen
 */
@Test
public void canCancelBatchConcurrently() throws Exception {
    String spid = TestUtils.freshServicePlanId();

    // Set up the first request (the one that will be delayed).
    MtBatchSmsResult expected1 = MtBatchTextSmsResult.builder().sender("12345")
            .addRecipient("123456789", "987654321").body("Hello, world!").canceled(true)
            .id(TestUtils.freshBatchId()).createdAt(OffsetDateTime.now()).modifiedAt(OffsetDateTime.now())
            .build();

    String path1 = "/v1/" + spid + "/batches/" + expected1.id();
    byte[] response1 = json.writeValueAsBytes(expected1);

    wm.stubFor(delete(urlEqualTo(path1)).willReturn(aResponse().withFixedDelay(500) // Delay for a while.
            .withStatus(200).withHeader("Content-Type", "application/json; charset=UTF-8")
            .withBody(response1)));

    // Set up the second request.
    MtBatchSmsResult expected2 = MtBatchBinarySmsResult.builder().sender("12345")
            .addRecipient("123456789", "987654321").body("Hello, world!".getBytes()).udh((byte) 1)
            .canceled(true).id(TestUtils.freshBatchId()).createdAt(OffsetDateTime.now())
            .modifiedAt(OffsetDateTime.now()).build();

    String path2 = "/v1/" + spid + "/batches/" + expected2.id();

    stubDeleteResponse(expected2, path2);

    ApiConnection conn = ApiConnection.builder().servicePlanId(spid).token("tok")
            .endpoint("http://localhost:" + wm.port()).start();

    try {
        final Queue<MtBatchSmsResult> results = new ConcurrentArrayQueue<MtBatchSmsResult>();
        final CountDownLatch latch = new CountDownLatch(2);

        FutureCallback<MtBatchSmsResult> callback = new TestCallback<MtBatchSmsResult>() {

            @Override
            public void completed(MtBatchSmsResult result) {
                results.add(result);
                latch.countDown();
            }

        };

        conn.cancelBatchAsync(expected1.id(), callback);
        Thread.sleep(100);
        conn.cancelBatchAsync(expected2.id(), callback);

        // Wait for callback to be called.
        latch.await();

        // We expect the second message to be handled first.
        assertThat(results.size(), is(2));
        assertThat(results.poll(), is(expected2));
        assertThat(results.poll(), is(expected1));
    } finally {
        conn.close();
    }

    verifyDeleteRequest(path1);
    verifyDeleteRequest(path2);
}

From source file:edu.uci.ics.hyracks.api.rewriter.ActivityClusterGraphRewriter.java

/**
 * rewrite an activity cluster internally
 * /*ww w.j av a2 s.co m*/
 * @param ac
 *            the activity cluster to be rewritten
 */
private void rewriteIntraActivityCluster(ActivityCluster ac,
        Map<IActivity, SuperActivity> invertedActivitySuperActivityMap) {
    Map<ActivityId, IActivity> activities = ac.getActivityMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityInputMap = ac.getActivityInputMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityOutputMap = ac.getActivityOutputMap();
    Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> connectorActivityMap = ac
            .getConnectorActivityMap();
    ActivityClusterGraph acg = ac.getActivityClusterGraph();
    Map<ActivityId, IActivity> startActivities = new HashMap<ActivityId, IActivity>();
    Map<ActivityId, SuperActivity> superActivities = new HashMap<ActivityId, SuperActivity>();
    Map<ActivityId, Queue<IActivity>> toBeExpendedMap = new HashMap<ActivityId, Queue<IActivity>>();

    /**
     * Build the initial super activities
     */
    for (Entry<ActivityId, IActivity> entry : activities.entrySet()) {
        ActivityId activityId = entry.getKey();
        IActivity activity = entry.getValue();
        if (activityInputMap.get(activityId) == null) {
            startActivities.put(activityId, activity);
            /**
             * use the start activity's id as the id of the super activity
             */
            createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap,
                    activityId, activity);
        }
    }

    /**
     * expand one-to-one connected activity cluster by the BFS order.
     * after the while-loop, the original activities are partitioned
     * into equivalent classes, one-per-super-activity.
     */
    Map<ActivityId, SuperActivity> clonedSuperActivities = new HashMap<ActivityId, SuperActivity>();
    while (toBeExpendedMap.size() > 0) {
        clonedSuperActivities.clear();
        clonedSuperActivities.putAll(superActivities);
        for (Entry<ActivityId, SuperActivity> entry : clonedSuperActivities.entrySet()) {
            ActivityId superActivityId = entry.getKey();
            SuperActivity superActivity = entry.getValue();

            /**
             * for the case where the super activity has already been swallowed
             */
            if (superActivities.get(superActivityId) == null) {
                continue;
            }

            /**
             * expend the super activity
             */
            Queue<IActivity> toBeExpended = toBeExpendedMap.get(superActivityId);
            if (toBeExpended == null) {
                /**
                 * Nothing to expand
                 */
                continue;
            }
            IActivity expendingActivity = toBeExpended.poll();
            List<IConnectorDescriptor> outputConnectors = activityOutputMap
                    .get(expendingActivity.getActivityId());
            if (outputConnectors != null) {
                for (IConnectorDescriptor outputConn : outputConnectors) {
                    Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = connectorActivityMap
                            .get(outputConn.getConnectorId());
                    IActivity newActivity = endPoints.getRight().getLeft();
                    SuperActivity existingSuperActivity = invertedActivitySuperActivityMap.get(newActivity);
                    if (outputConn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
                        /**
                         * expend the super activity cluster on an one-to-one out-bound connection
                         */
                        if (existingSuperActivity == null) {
                            superActivity.addActivity(newActivity);
                            toBeExpended.add(newActivity);
                            invertedActivitySuperActivityMap.put(newActivity, superActivity);
                        } else {
                            /**
                             * the two activities already in the same super activity
                             */
                            if (existingSuperActivity == superActivity) {
                                continue;
                            }
                            /**
                             * swallow an existing super activity
                             */
                            swallowExistingSuperActivity(superActivities, toBeExpendedMap,
                                    invertedActivitySuperActivityMap, superActivity, superActivityId,
                                    existingSuperActivity);
                        }
                    } else {
                        if (existingSuperActivity == null) {
                            /**
                             * create new activity
                             */
                            createNewSuperActivity(ac, superActivities, toBeExpendedMap,
                                    invertedActivitySuperActivityMap, newActivity.getActivityId(), newActivity);
                        }
                    }
                }
            }

            /**
             * remove the to-be-expended queue if it is empty
             */
            if (toBeExpended.size() == 0) {
                toBeExpendedMap.remove(superActivityId);
            }
        }
    }

    Map<ConnectorDescriptorId, IConnectorDescriptor> connMap = ac.getConnectorMap();
    Map<ConnectorDescriptorId, RecordDescriptor> connRecordDesc = ac.getConnectorRecordDescriptorMap();
    Map<SuperActivity, Integer> superActivityProducerPort = new HashMap<SuperActivity, Integer>();
    Map<SuperActivity, Integer> superActivityConsumerPort = new HashMap<SuperActivity, Integer>();
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        superActivityProducerPort.put(entry.getValue(), 0);
        superActivityConsumerPort.put(entry.getValue(), 0);
    }

    /**
     * create a new activity cluster to replace the old activity cluster
     */
    ActivityCluster newActivityCluster = new ActivityCluster(acg, ac.getId());
    newActivityCluster.setConnectorPolicyAssignmentPolicy(ac.getConnectorPolicyAssignmentPolicy());
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        newActivityCluster.addActivity(entry.getValue());
        acg.getActivityMap().put(entry.getKey(), newActivityCluster);
    }

    /**
     * Setup connectors: either inside a super activity or among super activities
     */
    for (Entry<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> entry : connectorActivityMap
            .entrySet()) {
        ConnectorDescriptorId connectorId = entry.getKey();
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = entry.getValue();
        IActivity producerActivity = endPoints.getLeft().getLeft();
        IActivity consumerActivity = endPoints.getRight().getLeft();
        int producerPort = endPoints.getLeft().getRight();
        int consumerPort = endPoints.getRight().getRight();
        RecordDescriptor recordDescriptor = connRecordDesc.get(connectorId);
        IConnectorDescriptor conn = connMap.get(connectorId);
        if (conn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
            /**
             * connection edge between inner activities
             */
            SuperActivity residingSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            residingSuperActivity.connect(conn, producerActivity, producerPort, consumerActivity, consumerPort,
                    recordDescriptor);
        } else {
            /**
             * connection edge between super activities
             */
            SuperActivity producerSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            SuperActivity consumerSuperActivity = invertedActivitySuperActivityMap.get(consumerActivity);
            int producerSAPort = superActivityProducerPort.get(producerSuperActivity);
            int consumerSAPort = superActivityConsumerPort.get(consumerSuperActivity);
            newActivityCluster.addConnector(conn);
            newActivityCluster.connect(conn, producerSuperActivity, producerSAPort, consumerSuperActivity,
                    consumerSAPort, recordDescriptor);

            /**
             * bridge the port
             */
            producerSuperActivity.setClusterOutputIndex(producerSAPort, producerActivity.getActivityId(),
                    producerPort);
            consumerSuperActivity.setClusterInputIndex(consumerSAPort, consumerActivity.getActivityId(),
                    consumerPort);
            acg.getConnectorMap().put(connectorId, newActivityCluster);

            /**
             * increasing the port number for the producer and consumer
             */
            superActivityProducerPort.put(producerSuperActivity, ++producerSAPort);
            superActivityConsumerPort.put(consumerSuperActivity, ++consumerSAPort);
        }
    }

    /**
     * Set up the roots of the new activity cluster
     */
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        List<IConnectorDescriptor> connIds = newActivityCluster.getActivityOutputMap().get(entry.getKey());
        if (connIds == null || connIds.size() == 0) {
            newActivityCluster.addRoot(entry.getValue());
        }
    }

    /**
     * set up the blocked2Blocker mapping, which will be updated in the rewriteInterActivityCluster call
     */
    newActivityCluster.getBlocked2BlockerMap().putAll(ac.getBlocked2BlockerMap());

    /**
     * replace the old activity cluster with the new activity cluster
     */
    acg.getActivityClusterMap().put(ac.getId(), newActivityCluster);
}

From source file:it.geosolutions.geobatch.actions.ds2ds.geoserver.DSGeoServerAction.java

@Override
public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    listenerForwarder.started();/*w w  w  . j a  v a2  s .c o  m*/

    // return object
    final Queue<EventObject> outputEvents = new LinkedList<EventObject>();

    //check global configurations
    //Geoserver config
    //----------------
    updateTask("Check GeoServer configuration");

    final String url = conf.getGeoserverURL();
    final String user = conf.getGeoserverUID();
    final String password = conf.getGeoserverPWD();
    GeoServerRESTManager gsMan = null;
    try {
        gsMan = new GeoServerRESTManager(new URL(url), user, password);
    } catch (MalformedURLException e) {
        failAction("Wrong GeoServer URL");

    } catch (IllegalArgumentException e) {
        failAction("Unable to create the GeoServer Manager using a null argument");

    }
    //TODO how to check if GS user/password are correct?
    listenerForwarder.progressing(5, "GeoServer configuration checked");

    //Check operation
    //---------------
    updateTask("Check operation");
    String op = conf.getOperation();
    if (op == null || !(op.equalsIgnoreCase("PUBLISH") || op.equalsIgnoreCase("REMOVE"))) {
        failAction("Bad operation: " + op + " in configuration");
    }
    listenerForwarder.progressing(10, "Operation checked");

    //Check WorkSpace
    //---------------
    updateTask("Check workspace configuration");
    String ws = conf.getDefaultNamespace();
    String wsUri = conf.getDefaultNamespaceUri();

    Boolean existWS = false;
    synchronized (existWS) {
        existWS = gsMan.getReader().getWorkspaceNames().contains(ws);

        if (!existWS) {

            boolean createWS = conf.getCreateNameSpace();
            if (createWS) {
                //try to create the workspace
                updateTask("Create workspace " + ws + " in GeoServer");
                boolean created = false;
                if (wsUri == null) {
                    created = gsMan.getPublisher().createWorkspace(ws);
                } else {
                    try {
                        created = gsMan.getPublisher().createWorkspace(ws, new URI(wsUri));
                    } catch (URISyntaxException e) {
                        failAction("Invalid NameSpace URI " + wsUri + " in configuration");
                    }
                }
                if (!created) {
                    failAction("FATAL: unable to create workspace " + ws + " in GeoServer");
                }
            } else {
                failAction("Bad workspace (namespace): " + ws + " in configuration");
            }
        }
    }

    listenerForwarder.progressing(25, "GeoServer workspace checked");

    //event-based business logic
    while (events.size() > 0) {
        final EventObject ev;
        try {
            if ((ev = events.remove()) != null) {

                updateTask("Working on incoming event: " + ev.getSource());

                updateTask("Check acceptable file");
                FileSystemEvent fileEvent = (FileSystemEvent) ev;

                //set FeatureConfiguration
                updateTask("Set Feature Configuration");
                this.createFeatureConfiguration(fileEvent);
                FeatureConfiguration featureConfig = conf.getFeatureConfiguration();

                //TODO check FeatureConfiguration
                updateTask("Check Feature Configuration");
                if (featureConfig.getTypeName() == null) {
                    failAction("feature typeName cannot be null");
                }

                //TODO check if the typeName already exists for the target workspace?

                //datastore check (and eventually creation)
                updateTask("Check datastore configuration");
                String ds = conf.getStoreName();

                Boolean existDS = false;
                synchronized (existDS) {

                    existDS = gsMan.getReader().getDatastores(ws).getNames().contains(ds);
                    if (!existDS) {
                        boolean createDS = conf.getCreateDataStore();
                        if (createDS) {

                            //create datastore
                            updateTask("Create datastore in GeoServer");
                            Map<String, Object> datastore = this.deserialize(featureConfig.getDataStore());

                            String dbType = (String) datastore.get("dbtype");

                            boolean created = false;
                            if (dbType.equalsIgnoreCase("postgis")) {
                                GSPostGISDatastoreEncoder encoder = new GSPostGISDatastoreEncoder(ds);
                                encoder.setName(ds);
                                encoder.setEnabled(true);
                                encoder.setHost((String) datastore.get("host"));
                                encoder.setPort(Integer.parseInt((String) datastore.get("port")));
                                encoder.setDatabase((String) datastore.get("database"));
                                encoder.setSchema((String) datastore.get("schema"));
                                encoder.setUser((String) datastore.get("user"));
                                encoder.setPassword((String) datastore.get("passwd"));

                                created = gsMan.getStoreManager().create(ws, encoder);
                                if (!created) {
                                    failAction("FATAL: unable to create PostGIS datastore " + ds
                                            + " in GeoServer");
                                }

                            } else if (dbType.equalsIgnoreCase("oracle")) {
                                String dbname = (String) datastore.get("database");
                                GSOracleNGDatastoreEncoder encoder = new GSOracleNGDatastoreEncoder(ds, dbname);
                                encoder.setName(ds);
                                encoder.setEnabled(true);
                                encoder.setHost((String) datastore.get("host"));
                                encoder.setPort(Integer.parseInt((String) datastore.get("port")));
                                encoder.setDatabase(dbname);
                                encoder.setSchema((String) datastore.get("schema"));
                                encoder.setUser((String) datastore.get("user"));
                                encoder.setPassword((String) datastore.get("passwd"));

                                created = gsMan.getStoreManager().create(ws, encoder);
                                if (!created) {
                                    failAction("FATAL: unable to create Oracle NG datastore " + ds
                                            + " in GeoServer");
                                }
                            } else {
                                failAction("The datastore type " + dbType + " is not supported");
                            }

                        } else {
                            failAction("Bad datastore:" + ds + " in configuration. Datastore " + ds
                                    + " doesn't exist in workspace (namespace) " + ws);
                        }
                    }
                }
                listenerForwarder.progressing(50, "Check GeoServer datastore");

                //feature type publication/removal
                boolean done = false;
                if (op.equalsIgnoreCase("PUBLISH")) {
                    if (!gsMan.getReader().getLayers().getNames().contains(featureConfig.getTypeName())) {

                        updateTask("Publish DBLayer " + featureConfig.getTypeName() + " in GeoServer");

                        //featuretype
                        final GSFeatureTypeEncoder fte = new GSFeatureTypeEncoder();
                        fte.setName(featureConfig.getTypeName());
                        fte.setTitle(featureConfig.getTypeName());
                        String crs = featureConfig.getCrs();
                        if (crs != null) {
                            fte.setSRS(featureConfig.getCrs());
                        } else {
                            fte.setSRS("EPSG:4326");
                        }
                        fte.setProjectionPolicy(ProjectionPolicy.FORCE_DECLARED);

                        //layer & styles
                        final GSLayerEncoder layerEncoder = new GSLayerEncoder();
                        layerEncoder.setDefaultStyle(this.defineLayerStyle(featureConfig, gsMan)); //default style

                        if (conf.getStyles() != null) {
                            //add available styles
                            for (String style : conf.getStyles()) {
                                layerEncoder.addStyle(style);
                            }
                        }

                        //publish
                        done = gsMan.getPublisher().publishDBLayer(ws, ds, fte, layerEncoder);
                        if (!done) {
                            failAction("Impossible to publish DBLayer " + featureConfig.getTypeName()
                                    + " in GeoServer");
                        }
                    }

                } else if (op.equalsIgnoreCase("REMOVE")) {
                    if (gsMan.getReader().getLayers().getNames().contains(featureConfig.getTypeName())) {

                        //remove
                        updateTask("Remove DBLayer " + featureConfig.getTypeName() + " from GeoServer");

                        done = gsMan.getPublisher().unpublishFeatureType(ws, ds, featureConfig.getTypeName());
                        if (!done) {
                            failAction("Impossible to remove DBLayer " + featureConfig.getTypeName()
                                    + " in GeoServer");
                        }
                    }
                }

                listenerForwarder.progressing(100F, "Successful Geoserver " + op + " operation");
                listenerForwarder.completed();
                outputEvents.add(ev);

            } else {
                if (LOGGER.isErrorEnabled()) {
                    LOGGER.error("Encountered a NULL event: SKIPPING...");
                }
                continue;
            }
        } catch (Exception ioe) {
            failAction("Unable to produce the output: " + ioe.getLocalizedMessage(), ioe);
        }
    }
    return outputEvents;

}