Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:org.protempa.backend.ksb.protege.ProtegeKnowledgeSourceBackend.java

private Collection<String> collectPropDescendantsInt(boolean inDataSourceOnly, boolean narrower,
        String[] propIds) throws KnowledgeSourceReadException {
    assert propIds != null : "propIds cannot be null";
    Slot inverseIsASlot = this.cm.getSlot("inverseIsA");
    Slot abstractedFromSlot = this.cm.getSlot("abstractedFrom");
    Slot inDataSourceSlot = this.cm.getSlot("inDataSource");
    Set<String> result = new HashSet<>();
    Queue<Instance> queue = new LinkedList<>();
    for (String propId : propIds) {
        Instance instance = this.cm.getInstance(propId);
        if (instance == null) {
            throw new KnowledgeSourceReadException("unknown proposition id " + propId);
        } else {/*  w w w.  j a v  a  2  s  .c  o m*/
            queue.add(instance);
        }
    }
    while (!queue.isEmpty()) {
        Instance instance = queue.poll();
        if (inDataSourceOnly) {
            Boolean inDataSource = (Boolean) this.cm.getOwnSlotValue(instance, inDataSourceSlot);
            if (inDataSource != null && inDataSource.booleanValue()) {
                result.add(instance.getName());
            }
        } else {
            result.add(instance.getName());
        }
        Collection<?> inverseIsAs = this.cm.getOwnSlotValues(instance, inverseIsASlot);
        for (Object obj : inverseIsAs) {
            queue.add((Instance) obj);
        }
        Collection<?> abstractedFroms = narrower ? this.cm.getOwnSlotValues(instance, abstractedFromSlot)
                : Collections.emptyList();
        for (Object obj : abstractedFroms) {
            queue.add((Instance) obj);
        }
    }
    return result;
}

From source file:gov.nih.nci.grididloader.BigIdCreator.java

/**
 * Create Big Id's for each entity and save them into the database.
 * Each entity is updated in parallel by several threads, but the entities
 * are processed in a serial fashion./* ww  w . j a  v  a  2 s .c om*/
 */
public void createAndUpdate() throws Exception {

    if (hiFactory.getSystemType() == HandleInterfaceType.CLASSIC) {
        // Create site handle, if the database is empty.
        // This is necessary because otherwise 50 threads will try to create it
        // at once, resulting in duplicates and a subsequent avalanche of collisions
        final HandleRepositoryIDInterface idSvc = (HandleRepositoryIDInterface) hiFactory.getHandleInterface();
        // create dummy id (also creates site handle)
        ResourceIdInfo rid = new ResourceIdInfo(new URI("urn://ncicb"), "dummy");
        idSvc.createOrGetGlobalID(rid);
        // remove the id we created, the site handle will remain
        idSvc.removeGlobalID(rid);
    }

    Connection conn = null;
    FileWriter benchmarkFile = null;

    try {
        benchmarkFile = new FileWriter("timings.txt");
        conn = dataSource.getConnection();

        for (BigEntity entity : config.getEntities()) {

            final String className = entity.getClassName();
            if (!classFilter.isEmpty() && ((include && !classFilter.contains(className))
                    || (!include && classFilter.contains(className)))) {
                System.err.println("Filtered out " + className);
                continue;
            }

            long start = System.currentTimeMillis();

            final String table = entity.getTableName();
            final String id = entity.getPrimaryKey();

            Statement stmt = null;
            ResultSet rs = null;
            long numRows = 0;
            long minId = 0;
            long maxId = 0;

            try {
                // get number of rows and id space for the current entity
                stmt = conn.createStatement();
                rs = stmt.executeQuery(
                        "SELECT MIN(" + id + ") minId, MAX(" + id + ") maxId, COUNT(*) rowCount FROM " + table);
                rs.next();
                numRows = rs.getLong("rowCount");
                minId = rs.getLong("minId");
                maxId = rs.getLong("maxId");
            } catch (SQLException e) {
                System.err.println("Error processing " + table);
                e.printStackTrace();
                continue;
            } finally {
                try {
                    if (rs != null)
                        rs.close();
                    if (stmt != null)
                        stmt.close();
                } catch (SQLException e) {
                    e.printStackTrace();
                }
            }

            /* This is an overly complicated formula to figure out the best 
             * chunk size possible. 
             * 
             * First we determine the idealChunkSize for the amount of rows
             * we are dealing with, based on a linear step equation:
             *10000|   ______
             * 9500|   :
             *     |  /:
             *     | / :
             * 500 |/  :
             * ____|___:_____
             *     0   500,000
             *          
             * In other words, the minimum chunk is 500. As the number of rows 
             * increases, the chunk size grows up to 9500. But after 500000 
             * rows, the chunk size jumps to 10000 and stays constant so that 
             * we don't overload each thread. Therefore, the chunk size is 
             * always between 500 and 10000. 
             * 
             * Secondly, the identifier spread is calculated and multiplied by 
             * the idealChunkSize to get the final chunkSize. If the ids are 
             * equal to the row numbers, the spread is 1 and the chunk size is 
             * ok. If, however, the id space is gigantic, then the chunk size 
             * will be increased proportionally to the average distance between
             * ids (assuming the ids are uniformally distributed).  
             *  
             * This actually works perfectly only if the ids ARE uniformally
             * distributed. In other corner cases, where the ids are clustered
             * together within a huge id space, the id space must be
             * partitioned recursively. 
             */
            final float idealChunkSize = (numRows > 500000) ? 10000 : .018f * numRows + 500;
            final float spread = (float) (maxId - minId + 1) / (float) numRows;
            final long chunkSize = Math.round(idealChunkSize * spread);

            System.out.println("Processing " + entity + " (" + entity.getTableName() + ") rows(" + numRows
                    + ") range(" + minId + "," + maxId + ") parallel(" + entity.isParallelLoadable() + ")");
            System.out.println("Parameters: spread(" + spread + ") chunkSize(ideal=" + idealChunkSize
                    + " actual=" + chunkSize + ")");

            final Map<BatchUpdate, Future<Boolean>> futures = new HashMap<BatchUpdate, Future<Boolean>>();
            final Queue<BatchUpdate> updates = new LinkedList<BatchUpdate>();

            // start each chunk as a task on the executor
            for (long i = minId; i <= maxId; i += chunkSize) {
                BatchUpdate update = new BatchUpdate(dataSource, hiFactory, entity, i, i + chunkSize - 1);
                updates.add(update);

                Future<Boolean> future = entity.isParallelLoadable() ? parallelExecutor.submit(update)
                        : serialExecutor.submit(update);

                futures.put(update, future);
            }

            // wait for all updates to finish
            while (!updates.isEmpty()) {
                final BatchUpdate update = updates.remove();
                final Future<Boolean> future = futures.remove(update);
                try {
                    // this get() blocks until the future is available
                    Boolean success = future.get();
                    if (success == null || !success.booleanValue()) {
                        System.err.println("FAILED: " + update);
                    } else {
                        int n = update.getNumUpdated();
                        if (n == 0) {
                            System.out.println("  done " + update + " (no rows found)");
                        } else {
                            int ut = (int) update.getAverageUpdateTime();
                            int ht = (int) update.getAverageHandleTime();
                            System.out.println("  done " + update + " rows(" + n + " rows) avg(handle=" + ht
                                    + "ms, update=" + ut + "ms)");
                        }
                    }
                } catch (ExecutionException e) {
                    System.err.println("Updated failed for entity: " + entity);
                    e.printStackTrace();
                } catch (InterruptedException e) {
                    System.err.println("Updated failed for entity: " + entity);
                    e.printStackTrace();
                }
            }

            float time = System.currentTimeMillis() - start;
            System.out.println("Done " + entity + " (" + (time / 1000) + " sec)\n");
            benchmarkFile.write(entity.getClassName() + "\t" + numRows + "\t" + time + "\n");
            benchmarkFile.flush();
        }

    } finally {
        try {
            if (conn != null)
                conn.close();
            if (benchmarkFile != null)
                benchmarkFile.close();
        } catch (SQLException e) {
            e.printStackTrace();
        }
    }

    // Done 
    parallelExecutor.shutdown();
    serialExecutor.shutdown();
}

From source file:com.sishuok.chapter3.web.controller.chat.MsgPublisher.java

public DeferredResult<String> startAsync(final String username) {
    final DeferredResult<String> result = new DeferredResult<String>(30L * 1000, null);

    final Runnable removeDeferredResultRunnable = new Runnable() {
        @Override/*w ww  . java  2  s  .c o m*/
        public void run() {
            Queue<DeferredResult<String>> queue = usernameToDeferredResultMap.get(username);
            if (queue != null) {
                queue.remove(result);
            }
        }
    };
    result.onCompletion(removeDeferredResultRunnable);
    result.onTimeout(removeDeferredResultRunnable);

    //????
    Queue<DeferredResult<String>> queue = usernameToDeferredResultMap.get(username);
    if (queue == null) {
        queue = new ConcurrentLinkedDeque();
        usernameToDeferredResultMap.put(username, queue);
    }
    queue.add(result);

    return result;
}

From source file:org.opencron.server.service.ExecuteService.java

/**
 * // w ww .java  2  s. c  o  m
 */
public boolean killJob(Record record) {

    final Queue<Record> recordQueue = new LinkedBlockingQueue<Record>();

    //?
    if (JobType.SINGLETON.getCode().equals(record.getJobType())) {
        recordQueue.add(record);
    } else if (JobType.FLOW.getCode().equals(record.getJobType())) {
        //?
        recordQueue.addAll(recordService.getRunningFlowJob(record.getRecordId()));
    }

    final List<Boolean> result = new ArrayList<Boolean>(0);
    Thread jobThread = new Thread(new Runnable() {
        @Override
        public void run() {
            for (final Record cord : recordQueue) {
                //kill(?,?kill)
                Thread thread = new Thread(new Runnable() {
                    public void run() {
                        //??...
                        cord.setStatus(RunStatus.STOPPING.getStatus());//?
                        cord.setSuccess(ResultStatus.KILLED.getStatus());//?.
                        JobVo job = null;
                        try {
                            recordService.save(cord);
                            job = jobService.getJobVoById(cord.getJobId());
                            //???kill
                            opencronCaller.call(
                                    Request.request(job.getIp(), job.getPort(), Action.KILL, job.getPassword())
                                            .putParam("pid", cord.getPid()),
                                    job.getAgent());
                            cord.setStatus(RunStatus.STOPED.getStatus());
                            cord.setEndTime(new Date());
                            recordService.save(cord);
                            loggerInfo("killed successful :jobName:{} at ip:{},port:{},pid:{}", job,
                                    cord.getPid());
                        } catch (Exception e) {
                            if (e instanceof PacketTooBigException) {
                                noticeService.notice(job, PACKETTOOBIG_ERROR);
                                loggerError("killed error:jobName:%s at ip:%s,port:%d,pid:%s", job,
                                        cord.getPid() + " failed info: " + PACKETTOOBIG_ERROR, e);
                            }
                            noticeService.notice(job, null);
                            loggerError("killed error:jobName:%s at ip:%s,port:%d,pid:%s", job,
                                    cord.getPid() + " failed info: " + e.getMessage(), e);
                            result.add(false);
                        }
                    }
                });
                thread.start();
            }
        }
    });
    jobThread.start();

    //?kill,kill?...
    try {
        jobThread.join();
    } catch (InterruptedException e) {
        logger.error("[opencron] kill job with error:{}", e.getMessage());
    }
    return !result.contains(false);
}

From source file:org.glassfish.jersey.examples.sseitemstore.jaxrs.JaxrsItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link JaxrsItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *///from   w  w  w. jav  a2  s.c  o  m
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final SseEventSource[] sources = new SseEventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final SseEventSource es = SseEventSource.target(itemsTarget.path("events")).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (null == inboundEvent.getName()) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);
        items.forEach((item) -> postItem(itemsTarget, item));

        assertTrue("Waiting to receive all events has timed out.",
                latch.await((1000 + MAX_LISTENERS * RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    items.forEach(
            (item) -> assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item)));

    final AtomicInteger queueId = new AtomicInteger(0);
    indexQueues.forEach((indexes) -> {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId.get(),
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId.get(), items.size(),
                indexes.size());
        queueId.incrementAndGet();
    });

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:org.onebusaway.uk.network_rail.gtfs_realtime.graph.PositionBerthToStanoxGraphMain.java

private void explore(Set<RawBerthNode> connections, RawStanoxNode stanoxNode) {
    Queue<OrderedRawBerthNode> queue = new PriorityQueue<OrderedRawBerthNode>();
    Set<RawBerthNode> visited = new HashSet<RawBerthNode>();
    int openCount = 0;
    for (RawBerthNode connection : connections) {
        queue.add(new OrderedRawBerthNode(connection, null, 0.0));
        openCount++;/*  ww w .  j  a va  2s .  co m*/
    }

    Map<RawBerthNode, RawBerthNode> parents = new HashMap<RawBerthNode, RawBerthNode>();

    while (!queue.isEmpty()) {
        OrderedRawBerthNode currentNode = queue.poll();
        RawBerthNode node = currentNode.getNode();
        boolean isOpen = currentNode.isOpen();
        if (isOpen) {
            openCount--;
        } else if (openCount == 0) {
            return;
        }
        if (visited.contains(node)) {
            continue;
        }
        visited.add(node);
        parents.put(node, currentNode.getParent());
        Set<RawStanoxNode> stanoxes = node.getStanox();
        if (stanoxes.size() > 0 && !stanoxes.contains(stanoxNode)) {
            _log.info(node + " stanoxes=" + stanoxes + " " + currentNode.getDistance() + " open=" + openCount);
            RawBerthNode c = node;
            while (c != null) {
                _log.info("  " + c);
                c = parents.get(c);
            }
            isOpen = false;
        }
        for (Map.Entry<RawBerthNode, List<Integer>> entry : node.getOutgoing().entrySet()) {
            RawBerthNode outgoing = entry.getKey();
            int avgDuration = RawNode.average(entry.getValue());
            queue.add(new OrderedRawBerthNode(outgoing, node, currentNode.getDistance() + avgDuration, isOpen));
            if (isOpen) {
                openCount++;
            }
        }
    }
}

From source file:eu.stratosphere.runtime.io.channels.InputChannel.java

@Override
public void destroy() {
    final Queue<Buffer> buffersToRecycle = new ArrayDeque<Buffer>();

    synchronized (this.queuedEnvelopes) {
        this.destroyCalled = true;

        while (!this.queuedEnvelopes.isEmpty()) {
            final Envelope envelope = this.queuedEnvelopes.poll();
            if (envelope.getBuffer() != null) {
                buffersToRecycle.add(envelope.getBuffer());
            }//  ww  w  . ja v  a2s  .co m
        }
    }

    while (!buffersToRecycle.isEmpty()) {
        buffersToRecycle.poll().recycleBuffer();
    }
}

From source file:org.glassfish.jersey.examples.sseitemstore.jersey.JerseyItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *//*from   ww w  .  j  av  a2 s .  co m*/
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final EventSource[] sources = new EventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (inboundEvent.getName() == null) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);

        for (String item : items) {
            postItem(itemsTarget, item);
        }

        assertTrue("Waiting to receive all events has timed out.",
                latch.await(
                        (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    for (String item : items) {
        assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item));
    }

    int queueId = 0;
    for (Queue<Integer> indexes : indexQueues) {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId,
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId, items.size(),
                indexes.size());
        queueId++;
    }

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:org.apache.kylin.job.hadoop.invertedindex.IITest.java

/**
 * simulate stream building into slices, and encode the slice into IIRows.
 * Then reconstruct the IIRows to slice.
 *//*from  w  w w  . ja  v a2  s . c o  m*/
@Test
public void basicTest() {
    Queue<IIRow> buffer = Lists.newLinkedList();
    FIFOIterable bufferIterable = new FIFOIterable(buffer);
    TableRecordInfo info = new TableRecordInfo(iiDesc);
    TableRecordInfoDigest digest = info.getDigest();
    KeyValueCodec codec = new IIKeyValueCodecWithState(digest);
    Iterator<Slice> slices = codec.decodeKeyValue(bufferIterable).iterator();

    Assert.assertTrue(!slices.hasNext());
    Assert.assertEquals(iiRows.size(), digest.getColumnCount());

    for (int i = 0; i < digest.getColumnCount(); ++i) {
        buffer.add(iiRows.get(i));

        if (i != digest.getColumnCount() - 1) {
            Assert.assertTrue(!slices.hasNext());
        } else {
            Assert.assertTrue(slices.hasNext());
        }
    }

    Slice newSlice = slices.next();
    Assert.assertEquals(newSlice.getLocalDictionaries()[0].getSize(), 2);
}

From source file:org.lunarray.model.descriptor.scanner.AnnotationScannerUtil.java

/**
 * Transitively tests for marker marks.//  w  w w . ja  va2s.  c  o m
 * 
 * @param marker
 *            The marker.
 * @param processed
 *            The processed annotations.
 * @param process
 *            The annotations to process.
 * @return True if and only if the marker marks any of the processed
 *         annotations.
 */
private boolean isMarkedTransiviteProcess(final Class<? extends Annotation> marker,
        final Set<Class<? extends Annotation>> processed, final Queue<Class<? extends Annotation>> process) {
    boolean marked = false;
    while (!process.isEmpty()) {
        final Class<? extends Annotation> poll = process.poll();
        processed.add(poll);
        if (poll.equals(marker)) {
            marked = true;
            process.clear();
        } else {
            for (final Annotation annotation : poll.getAnnotations()) {
                final Class<? extends Annotation> annotationType = annotation.annotationType();
                if (!processed.contains(annotationType)) {
                    process.add(annotationType);
                }
            }
        }
    }
    return marked;
}