Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:org.rhq.cassandra.ClusterInitService.java

/**
 * This method attempts to establish a Thrift RPC connection to each host for the
 * number specified. In other words, if there are four hosts and <code>numHosts</code>
 * is 2, this method will block only until it can connect to two of the hosts. If the
 * connection fails, the host is retried after going through the other, remaining
 * hosts.//from w w w .  j a va  2s  .  co  m
 * <br/><br/>
 * After connecting to all cluster nodes, this method will sleep for 10 seconds
 * before returning. This is to give the cluster a chance to create the system auth
 * schema and to create the cassandra super user. Cassandra has a hard-coded delay of
 * 10 sceonds before it creates the super user, which means the rhq schema cannot be
 * created before that.
 * @param numHosts The number of hosts to which a successful connection has to be made
 *                 before returning.
 * @param delay The amount of time wait between attempts to make a connection
 * @param retries The number of times to retry connecting. A runtime exception will be
 *                thrown when the number of failed connections exceeds this value.
 * @param initialWait The amount of seconds before first try.
 */
public void waitForClusterToStart(String[] storageNodes, int jmxPorts[], int numHosts, long delay, int retries,
        int initialWait) {
    if (initialWait > 0) {
        if (log.isDebugEnabled()) {
            log.debug("Waiting before JMX calls to the storage nodes for " + initialWait + " seconds...");
        }
        sleep(initialWait * 1000);
    }

    int connections = 0;
    int failedConnections = 0;
    Queue<Integer> queue = new LinkedList<Integer>();
    for (int index = 0; index < storageNodes.length; index++) {
        queue.add(index);
    }

    Integer storageNodeIndex = queue.poll();

    while (storageNodeIndex != null) {
        if (failedConnections >= retries) {
            throw new RuntimeException("Unable to verify that cluster nodes have started after "
                    + failedConnections + " failed attempts");
        }
        try {
            boolean isNativeTransportRunning = isNativeTransportRunning(storageNodes[storageNodeIndex],
                    jmxPorts[storageNodeIndex]);
            if (log.isDebugEnabled() && isNativeTransportRunning) {
                log.debug("Successfully connected to cassandra node [" + storageNodes[storageNodeIndex] + "]");
            }
            if (isNativeTransportRunning) {
                ++connections;
            } else {
                queue.offer(storageNodeIndex);
            }
            if (connections == numHosts) {
                if (log.isDebugEnabled()) {
                    log.debug("Successdully connected to all nodes. Sleeping for 10 seconds to allow for the "
                            + "cassandra superuser set up to complete.");
                }
                sleep(10 * 1000);
                return;
            }
        } catch (Exception e) {
            ++failedConnections;
            queue.offer(storageNodeIndex);
            if (log.isDebugEnabled()) {
                log.debug("Unable to open JMX connection on port [" + jmxPorts[storageNodeIndex]
                        + "] to cassandra node [" + storageNodes[storageNodeIndex] + "].", e);
            } else if (log.isInfoEnabled()) {
                log.debug("Unable to open connection to cassandra node.");
            }
        }
        sleep(delay);
        storageNodeIndex = queue.poll();
    }
}

From source file:io.hops.hopsworks.api.zeppelin.socket.NotebookServer.java

private void addUserConnection(String user, Session conn) {
    if (impl.userConnectedSocketsContainsKey(user)) {
        impl.getUserConnectedSocket(user).add(conn);
    } else {/*from   w w  w.  j  av a 2  s  .  c  o  m*/
        Queue<Session> socketQueue = new ConcurrentLinkedQueue<>();
        socketQueue.add(conn);
        impl.putUserConnectedSocket(user, socketQueue);
    }
}

From source file:fr.landel.utils.commons.CastUtilsTest.java

/**
 * Check cast list/*from  ww  w  .  j  a v a  2s . c o m*/
 */
@Test
public void testGetQueue() {

    Queue<String> queue = new LinkedList<>();
    queue.add("value1");
    queue.add(null);
    queue.add("value2");

    assertTrue(CollectionUtils.isEmpty(CastUtils.getLinkedListAsQueue(null, String.class)));
    assertTrue(CollectionUtils.isEmpty(CastUtils.getLinkedTransferQueue(null, String.class)));
    assertTrue(CollectionUtils.isEmpty(CastUtils.getPriorityQueue(null, String.class)));
    assertTrue(CollectionUtils.isEmpty(CastUtils.getLinkedBlockingQueue(null, String.class)));
    assertTrue(CollectionUtils.isEmpty(CastUtils.getPriorityBlockingQueue(null, String.class)));
    assertTrue(CollectionUtils.isEmpty(CastUtils.getArrayBlockingQueue(null, String.class, queue.size())));

    Queue<String> result = CastUtils.getLinkedListAsQueue(queue, String.class);
    assertEquals("value1", result.poll());
    assertNull(result.poll());
    assertEquals("value2", result.poll());

    result = CastUtils.getLinkedTransferQueue(queue, String.class);
    assertEquals("value1", result.poll());
    assertEquals("value2", result.poll());

    result = CastUtils.getPriorityQueue(queue, String.class);
    assertEquals("value1", result.poll());
    assertEquals("value2", result.poll());

    result = CastUtils.getLinkedBlockingQueue(queue, String.class);
    assertEquals("value1", result.poll());
    assertEquals("value2", result.poll());

    result = CastUtils.getPriorityBlockingQueue(queue, String.class);
    assertEquals("value1", result.poll());
    assertEquals("value2", result.poll());

    result = CastUtils.getArrayBlockingQueue(queue, String.class, queue.size());
    assertEquals("value1", result.poll());
    assertEquals("value2", result.poll());

    assertEquals(0, CastUtils.getLinkedListAsQueue(12, String.class).size());

    Queue<Integer> queue2 = new LinkedList<>();
    queue2.add(2);
    assertEquals(0, CastUtils.getLinkedListAsQueue(queue2, String.class).size());
}

From source file:it.geosolutions.geobatch.imagemosaic.MetadataPresentationOnlineTest.java

protected RESTCoverage createAndRunAction(TestConfiguration testConfig) throws Exception {
    removeStore();/* w w w .  ja v a  2  s . c  o  m*/

    File imcFile;

    //=== Add first set of granules
    LOGGER.info(" ***** CREATING FIRST BATCH OF GRANULES");
    {
        ImageMosaicCommand imc = recreateIMC("20121004", "20121005", "20121006", "20121007", "20121008");
        // serialize
        imcFile = new File(getTempDir(), "ImageMosaicCommand0.xml");
        LOGGER.info("Creating  " + imcFile);
        ImageMosaicCommand.serialize(imc, imcFile.toString());
    }

    {
        Queue<EventObject> inputQ = new LinkedList<EventObject>();
        inputQ.add(new FileSystemEvent(imcFile, FileSystemEventType.FILE_ADDED));
        ImageMosaicAction action = createMosaicAction(testConfig);
        Queue<EventObject> outputQ = action.execute(inputQ);
    }

    DataStore dataStore = createDatastore();
    assertEquals(5, dataStore.getFeatureSource(STORENAME).getCount(Query.ALL));

    GeoServerRESTReader reader = new GeoServerRESTReader(getFixture().getProperty("gs_url"),
            getFixture().getProperty("gs_user"), getFixture().getProperty("gs_password"));

    RESTCoverage coverageInfo = reader.getCoverage(WORKSPACE, STORENAME, STORENAME);

    //        removeStore();

    return coverageInfo;
}

From source file:com.thinkbiganalytics.metadata.jobrepo.nifi.provenance.NifiStatsJmsReceiver.java

private void addFeedProcessorError(NifiFeedProcessorErrors error) {
    Queue<NifiFeedProcessorErrors> q = feedProcessorErrors.getUnchecked(error.getFeedName());
    if (q != null) {
        q.add(error);
    }//from   w w w .j  ava  2s . com
}

From source file:org.sonar.server.db.migrations.v36.Referentials.java

private Queue<long[]> initGroupOfViolationIds(Database database) throws SQLException {
    Connection connection = database.getDataSource().getConnection();
    Statement stmt = null;//from w  w w  . jav a 2 s.  c o m
    ResultSet rs = null;
    try {
        connection.setAutoCommit(false);
        stmt = connection.createStatement();
        stmt.setFetchSize(10000);
        rs = stmt.executeQuery("select id from rule_failures");
        Queue<long[]> queue = new ConcurrentLinkedQueue<long[]>();

        totalViolations = 0;
        long[] block = new long[VIOLATION_GROUP_SIZE];
        int cursor = 0;
        while (rs.next()) {
            block[cursor] = rs.getLong(1);
            cursor++;
            totalViolations++;
            if (cursor == VIOLATION_GROUP_SIZE) {
                queue.add(block);
                block = new long[VIOLATION_GROUP_SIZE];
                cursor = 0;
            }
        }
        if (cursor > 0) {
            queue.add(block);
        }
        return queue;
    } finally {
        DbUtils.closeQuietly(connection, stmt, rs);
    }
}

From source file:org.unitime.timetable.solver.curricula.CurriculaCourseDemands.java

protected void computeTargetShare(int nrStudents, Collection<CurriculumCourse> courses,
        CurriculumCourseGroupsProvider course2groups, CurModel model) {
    for (CurriculumCourse c1 : courses) {
        float x1 = c1.getPercShare() * nrStudents;
        Set<CurriculumCourse>[] group = new HashSet[] { new HashSet<CurriculumCourse>(),
                new HashSet<CurriculumCourse>() };
        Queue<CurriculumCourse> queue = new LinkedList<CurriculumCourse>();
        queue.add(c1);
        Set<CurriculumCourseGroup> done = new HashSet<CurriculumCourseGroup>();
        while (!queue.isEmpty()) {
            CurriculumCourse c = queue.poll();
            for (CurriculumCourseGroup g : course2groups.getGroups(c))
                if (done.add(g))
                    for (CurriculumCourse x : courses)
                        if (!x.equals(c) && !x.equals(c1) && course2groups.getGroups(x).contains(g)
                                && group[group[0].contains(c) ? 0 : g.getType()].add(x))
                            queue.add(x);
        }/*from   w  w w.jav a  2s.  co m*/
        for (CurriculumCourse c2 : courses) {
            float x2 = c2.getPercShare() * nrStudents;
            if (c1.getUniqueId() >= c2.getUniqueId())
                continue;
            float share = c1.getPercShare() * c2.getPercShare() * nrStudents;
            boolean opt = group[0].contains(c2);
            boolean req = !opt && group[1].contains(c2);
            model.setTargetShare(c1.getUniqueId(), c2.getUniqueId(), opt ? 0.0 : req ? Math.min(x1, x2) : share,
                    true);
        }
    }
}

From source file:edu.pitt.dbmi.deep.phe.i2b2.I2b2OntologyBuilder.java

private TreeSet<PartialPath> extractOntologyPartialPaths() throws OWLOntologyCreationException, IOException {

    final TreeSet<PartialPath> partialPaths = new TreeSet<PartialPath>();

    OWLOntologyManager m = OWLManager.createOWLOntologyManager();
    // OWLOntology o = m.loadOntologyFromOntologyDocument(pizza_iri);
    OWLOntology o = loadDeepPheOntology(m);
    OWLReasonerFactory reasonerFactory;/*from w  w  w  . ja  va2  s. co m*/
    reasonerFactory = new StructuralReasonerFactory();
    OWLReasoner reasoner = reasonerFactory.createReasoner(o);
    OWLDataFactory fac = m.getOWLDataFactory();
    OWLClass elementConcept = fac.getOWLClass(IRI.create(CONST_TOP_LEVEL_ENTRY));

    final Queue<PartialPath> partialPathQueue = new LinkedList<PartialPath>();
    NodeSet<OWLClass> subClses = reasoner.getSubClasses(elementConcept, true);
    for (Node<OWLClass> subCls : subClses) {
        PartialPath path = new PartialPath();
        path.setReasoner(reasoner);
        path.setCls(subCls.getRepresentativeElement());
        path.setLevel(1);
        partialPathQueue.add(path);
    }

    while (true) {
        PartialPath path;
        path = partialPathQueue.poll();
        if (path == null) {
            break;
        } else {
            partialPathQueue.addAll(path.expand());
        }
        partialPaths.add(path);
    }

    PartialPath topLevel = new PartialPath();
    topLevel.setPath("\\DEEPPHE");
    topLevel.setLevel(0);
    topLevel.setLeaf(false);
    partialPaths.add(topLevel);

    return partialPaths;
}

From source file:com.android.volley.RequestQueue.java

/**
 * Adds a Request to the dispatch queue.
 * @param request The request to service
 * @return The passed-in request//from   ww w .  j a  va  2s .c  o m
 */
public <T> Request<T> add(Request<T> request) {
    // Tag the request as belonging to this queue and add it to the set of current requests.
    request.setRequestQueue(this);
    synchronized (mCurrentRequests) {
        mCurrentRequests.add(request);
    }

    // Process requests in the order they are added.
    request.setSequence(getSequenceNumber());
    request.addMarker("add-to-queue");

    // If the request is uncacheable, skip the cache queue and go straight to the network.
    if (!request.shouldCache()) {
        mNetworkQueue.add(request);
        return request;
    }

    // Insert request into stage if there's already a request with the same cache key in flight.
    synchronized (mWaitingRequests) {
        String cacheKey = request.getCacheKey();
        if (mWaitingRequests.containsKey(cacheKey)) {
            // There is already a request in flight. Queue up.
            Queue<Request<?>> stagedRequests = mWaitingRequests.get(cacheKey);
            if (stagedRequests == null) {
                stagedRequests = new LinkedList<Request<?>>();
            }
            stagedRequests.add(request);
            mWaitingRequests.put(cacheKey, stagedRequests);
            if (VolleyLog.DEBUG) {
                VolleyLog.v("Request for cacheKey=%s is in flight, putting on hold.", cacheKey);
            }
        } else {
            // Insert 'null' queue for this cacheKey, indicating there is now a request in
            // flight.
            mWaitingRequests.put(cacheKey, null);
            mCacheQueue.add(request);
        }
        return request;
    }
}

From source file:org.apache.predictionio.examples.java.recommendations.tutorial1.Algorithm.java

private void setTopItemSimilarity(Map<Integer, Queue<IndexAndScore>> topItemSimilarity, Integer itemID1,
        Integer index2, double score, int capacity, Comparator<IndexAndScore> comparator) {
    Queue<IndexAndScore> queue = topItemSimilarity.get(itemID1);
    if (queue == null) {
        queue = new PriorityQueue<IndexAndScore>(capacity, comparator);
        topItemSimilarity.put(itemID1, queue);
    }//ww w  .ja  v a 2 s .c om
    IndexAndScore entry = new IndexAndScore(index2, score);
    if (queue.size() < capacity)
        queue.add(entry);
    else if (comparator.compare(queue.peek(), entry) < 0) {
        queue.poll();
        queue.add(entry);
    }
}