Example usage for java.util.concurrent BlockingQueue add

List of usage examples for java.util.concurrent BlockingQueue add

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:any.servable.VsetServable.java

@Override
public void process(Message cmd, BlockingQueue<Message> outQueue) {
    if (cmd == null) {
        loopqueue = outQueue;/*from w w  w  . j a  va2s.c om*/
    } else {

        String filename = cmd.getContent();

        currentfile = new File(res, filename);

        logger.debug(currentfile.getAbsolutePath());
        FileOutputStream fw;
        try {
            fw = new FileOutputStream(currentfile);

            fw.write(cmd.getData());
            fw.flush();
            fw.close();

        } catch (IOException e) {
            e.printStackTrace();
            final Writer result = new StringWriter();
            final PrintWriter printWriter = new PrintWriter(result);
            e.printStackTrace(printWriter);
            outQueue.add(new Message("vset", "_error", "text/plain", "YES", result.toString().getBytes()));
        }

        if (cmd.getFullContent().contains("show=YES")) {
            Display display = Display.getDefault();
            display.syncExec(new Runnable() {
                public void run() {
                    URL pageUrl = null;
                    try {
                        pageUrl = currentfile.toURI().toURL();
                        browser.setUrl(pageUrl.toString());
                        blockpage = false;
                    } catch (MalformedURLException e) {
                        // TODO Auto-generated catch block
                        e.printStackTrace();
                    }
                }
            });
        }
    }
}

From source file:coral.RessyncServable.java

/**
 * check if the file needs to be updated, if yes, send out a get request
 *///from  www .  j ava 2  s. c  o  m
@Override
public void process(Message cmd, BlockingQueue<Message> outQueue) {
    if (cmd == null) {
        //         queue = outQueue;
    } else {

        String filename = cmd.getContent();

        if (logger.isDebugEnabled()) {
            logger.debug("check  " + filename + " - " + cmd.getFullContent());
        }

        Map<String, String> q = CoralUtils.urlToMap(cmd.getFullContent());

        if (logger.isDebugEnabled()) {
            logger.debug("check map " + q.toString());
        }

        final File file = new File(res, filename);

        logger.debug("check file " + file.getAbsolutePath());

        boolean request = false;
        if (file.exists()) {
            long version = file.lastModified();

            long expectedVersion = Long.parseLong(q.get("version"));

            if (expectedVersion > version) {
                request = true;
            }
        } else {
            request = true;
        }

        if (request) {
            outQueue.add(new Message(q.get("synccmd"), new byte[] {}));
        }
    }
}

From source file:com.facebook.LinkBench.LinkBenchDriver.java

private void enqueueLoadWork(BlockingQueue<LoadChunk> chunk_q, long startid1, long maxid1, int nloaders,
        Random rng) {/*w ww .jav  a  2 s.  c o m*/
    // Enqueue work chunks.  Do it in reverse order as a heuristic to improve
    // load balancing, since queue is FIFO and later chunks tend to be larger

    int chunkSize = ConfigUtil.getInt(props, Config.LOADER_CHUNK_SIZE, 2048);
    long chunk_num = 0;
    ArrayList<LoadChunk> stack = new ArrayList<LoadChunk>();
    for (long id1 = startid1; id1 < maxid1; id1 += chunkSize) {
        stack.add(new LoadChunk(chunk_num, id1, Math.min(id1 + chunkSize, maxid1), rng));
        chunk_num++;
    }

    for (int i = stack.size() - 1; i >= 0; i--) {
        chunk_q.add(stack.get(i));
    }

    for (int i = 0; i < nloaders; i++) {
        // Add a shutdown signal for each loader
        chunk_q.add(LoadChunk.SHUTDOWN);
    }
}

From source file:com.google.dart.compiler.metrics.Tracer.java

private BlockingQueue<TraceEvent> openLogWriter(final Writer writer, final String fileName) {
    try {/*from w w w.  j  a v  a2s.  c o m*/
        if (outputFormat.equals(Format.HTML)) {
            writer.write("<HTML isdump=\"true\"><body>"
                    + "<style>body {font-family:Helvetica; margin-left:15px;}</style>"
                    + "<h2>Performance dump from GWT</h2>"
                    + "<div>This file contains data that can be viewed with the "
                    + "<a href=\"http://code.google.com/speedtracer\">SpeedTracer</a> "
                    + "extension under the <a href=\"http://chrome.google.com/\">"
                    + "Chrome</a> browser.</div><p><span id=\"info\">"
                    + "(You must install the SpeedTracer extension to open this file)</span></p>"
                    + "<div style=\"display: none\" id=\"traceData\" version=\"0.17\">\n");
        }
    } catch (IOException e) {
        System.err
                .println("Unable to write to dart.speedtracerlog '" + (fileName == null ? "" : fileName) + "'");
        e.printStackTrace();
        return null;
    }

    final BlockingQueue<TraceEvent> eventQueue = new LinkedBlockingQueue<TraceEvent>();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                // Wait for the other thread to drain the queue.
                eventQueue.add(shutDownSentinel);
                shutDownLatch.await();
            } catch (InterruptedException e) {
                // Ignored
            }
        }
    });

    // Background thread to write SpeedTracer events to log
    Thread logWriterWorker = new LogWriterThread(writer, fileName, eventQueue);

    // Lower than normal priority.
    logWriterWorker.setPriority((Thread.MIN_PRIORITY + Thread.NORM_PRIORITY) / 2);

    /*
     * This thread must be daemon, otherwise shutdown hooks would never begin to
     * run, and an app wouldn't finish.
     */
    logWriterWorker.setDaemon(true);
    logWriterWorker.setName("SpeedTracerLogger writer");
    logWriterWorker.start();
    return eventQueue;
}

From source file:org.apache.falcon.service.FeedSLAMonitoringService.java

@SuppressWarnings("unchecked")
private void deserialize(Path path) throws FalconException {
    try {//from  ww w. ja  v  a2s  .  co m
        Map<String, Object> state = deserializeInternal(path);
        pendingInstances = new ConcurrentHashMap<>();
        Map<Pair<String, String>, BlockingQueue<Date>> pendingInstancesCopy = (Map<Pair<String, String>, BlockingQueue<Date>>) state
                .get("pendingInstances");
        // queue size can change during restarts, hence copy
        for (Map.Entry<Pair<String, String>, BlockingQueue<Date>> entry : pendingInstancesCopy.entrySet()) {
            BlockingQueue<Date> value = new LinkedBlockingQueue<>(queueSize);
            BlockingQueue<Date> oldValue = entry.getValue();
            LOG.debug("Number of old instances:{}, new queue size:{}", oldValue.size(), queueSize);
            while (!oldValue.isEmpty()) {
                Date instance = oldValue.remove();
                if (value.size() == queueSize) { // if full
                    LOG.debug("Deserialization: Removing value={} for <feed,cluster>={}", value.peek(),
                            entry.getKey());
                    value.remove();
                }
                LOG.debug("Deserialization Adding: key={} to <feed,cluster>={}", entry.getKey(), instance);
                value.add(instance);
            }
            pendingInstances.put(entry.getKey(), value);
        }
        lastCheckedAt = new Date((Long) state.get("lastCheckedAt"));
        lastSerializedAt = new Date((Long) state.get("lastSerializedAt"));
        monitoredFeeds = new ConcurrentHashSet<>(); // will be populated on the onLoad of entities.
        LOG.debug("Restored the service from old state.");
    } catch (IOException | ClassNotFoundException e) {
        throw new FalconException("Couldn't deserialize the old state", e);
    }
}

From source file:org.apache.usergrid.tools.ExportAdmins.java

/**
 * Export admin users using multiple threads.
 * <p/>// w  w  w  .  j a va2 s  . c  om
 * How it works:
 * In main thread we query for IDs of all admin users, add each ID to read queue.
 * Read-queue workers read admin user data, add data to write queue.
 * One write-queue worker reads data writes to file.
 */
@Override
public void runTool(CommandLine line) throws Exception {
    startSpring();

    setVerbose(line);

    applyOrgId(line);
    prepareBaseOutputFileName(line);
    outputDir = createOutputParentDir();
    logger.info("Export directory: " + outputDir.getAbsolutePath());

    if (StringUtils.isNotEmpty(line.getOptionValue(READ_THREAD_COUNT))) {
        try {
            readThreadCount = Integer.parseInt(line.getOptionValue(READ_THREAD_COUNT));
        } catch (NumberFormatException nfe) {
            logger.error("-" + READ_THREAD_COUNT + " must be specified as an integer. Aborting...");
            return;
        }
    } else {
        readThreadCount = 20;
    }

    buildOrgMap();

    // start write queue worker

    BlockingQueue<AdminUserWriteTask> writeQueue = new LinkedBlockingQueue<AdminUserWriteTask>();
    AdminUserWriter adminUserWriter = new AdminUserWriter(writeQueue);
    Thread writeThread = new Thread(adminUserWriter);
    writeThread.start();
    logger.debug("Write thread started");

    // start read queue workers

    BlockingQueue<UUID> readQueue = new LinkedBlockingQueue<UUID>();
    for (int i = 0; i < readThreadCount; i++) {
        AdminUserReader worker = new AdminUserReader(readQueue, writeQueue);
        Thread readerThread = new Thread(worker, "AdminUserReader-" + i);
        readerThread.start();
    }
    logger.debug(readThreadCount + " read worker threads started");

    // query for IDs, add each to read queue

    Query query = new Query();
    query.setLimit(MAX_ENTITY_FETCH);
    query.setResultsLevel(Query.Level.IDS);
    EntityManager em = emf.getEntityManager(CpNamingUtils.MANAGEMENT_APPLICATION_ID);
    Results ids = em.searchCollection(em.getApplicationRef(), "users", query);

    while (ids.size() > 0) {
        for (UUID uuid : ids.getIds()) {
            readQueue.add(uuid);
            //logger.debug( "Added uuid to readQueue: " + uuid );
        }
        if (ids.getCursor() == null) {
            break;
        }
        query.setCursor(ids.getCursor());
        ids = em.searchCollection(em.getApplicationRef(), "users", query);
    }

    logger.debug("Waiting for write thread to complete");

    boolean done = false;
    while (!done) {
        writeThread.join(10000, 0);
        done = !writeThread.isAlive();
        logger.info("Wrote {} users", userCount.get());
    }
}

From source file:io.orchestrate.client.itest.KvTest.java

@Theory
public void deleteKeyAsync(@ForAll(sampleSize = 10) final String key) throws InterruptedException {
    assumeThat(key, not(isEmptyString()));

    final BlockingQueue<Boolean> queue = DataStructures.getLTQInstance(Boolean.class);
    client.kv(collection(), key).delete().on(new ResponseAdapter<Boolean>() {
        @Override//from w  w  w .  j  av  a 2  s . c  om
        public void onFailure(final Throwable error) {
            fail(error.getMessage());
        }

        @Override
        public void onSuccess(final Boolean object) {
            queue.add(object);
        }
    });

    final Boolean result = queue.poll(5000, TimeUnit.MILLISECONDS);
    assertTrue(result);
}

From source file:io.orchestrate.client.itest.KvTest.java

@Theory
public void getKeyWithInvalidApiKey(@ForAll(sampleSize = 2) final String key) throws InterruptedException {
    assumeThat(key, not(isEmptyString()));

    String badKey = "12345678-1234-1234-1234-1234567890123";
    Client badClient = OrchestrateClient.builder(badKey).build();

    final BlockingQueue<Throwable> failureQueue = DataStructures.getLTQInstance(Throwable.class);

    try {//from  w w w .  j  a  va  2s .  co  m
        final KvObject<String> object = badClient.kv(collection(), key).get(String.class)
                .on(new ResponseListener<KvObject<String>>() {
                    @Override
                    public void onFailure(Throwable error) {
                        failureQueue.add(error);
                    }

                    @Override
                    public void onSuccess(KvObject<String> object) {
                    }
                }).get();
        fail("Should have thrown InvalidApiKeyException on 'get()'.");
    } catch (InvalidApiKeyException ex) {
    }
    @SuppressWarnings("unchecked")
    final Throwable failure = failureQueue.poll(5000, TimeUnit.MILLISECONDS);
    assertTrue(failure instanceof InvalidApiKeyException);
}

From source file:io.orchestrate.client.itest.KvTest.java

@Test
public void postValueAsync() throws InterruptedException, IOException {
    final String collection = collection();

    final BlockingQueue<KvMetadata> queue = DataStructures.getLTQInstance(KvMetadata.class);
    client.postValue(collection, "{}").on(new ResponseAdapter<KvMetadata>() {
        @Override//from w  w  w .j  av  a 2s  .  c  o  m
        public void onFailure(final Throwable error) {
            fail(error.getMessage());
        }

        @Override
        public void onSuccess(final KvMetadata object) {
            queue.add(object);
        }
    });
    final KvMetadata kvMetadata = queue.poll(5000, TimeUnit.MILLISECONDS);

    assertNotNull(kvMetadata);
    assertNotNull(kvMetadata.getKey());
    assertEquals(collection, kvMetadata.getCollection());
}

From source file:io.orchestrate.client.itest.KvTest.java

@Theory
public void putKeyAsync(@ForAll(sampleSize = 10) final String key) throws InterruptedException {
    assumeThat(key, not(isEmptyString()));

    final BlockingQueue<KvMetadata> queue = DataStructures.getLTQInstance(KvMetadata.class);
    client.kv(collection(), key).put("{}").on(new ResponseAdapter<KvMetadata>() {
        @Override//from  w w w  .j a  v a2s .  c o m
        public void onFailure(final Throwable error) {
            fail(error.getMessage());
        }

        @Override
        public void onSuccess(final KvMetadata object) {
            queue.add(object);
        }
    });

    final KvMetadata kvMetadata = queue.poll(5000, TimeUnit.MILLISECONDS);

    final KvObject<String> kvObject = client.kv(kvMetadata.getCollection(), kvMetadata.getKey())
            .get(String.class).get();

    assertNotNull(kvMetadata);
    assertNotNull(kvObject);
    assertEquals(kvMetadata.getCollection(), kvObject.getCollection());
    assertEquals(kvMetadata.getKey(), kvObject.getKey());
    assertEquals(kvMetadata.getRef(), kvObject.getRef());
    assertEquals("{}", kvObject.getValue());
}