Example usage for java.util.concurrent BlockingQueue put

List of usage examples for java.util.concurrent BlockingQueue put

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue put.

Prototype

void put(E e) throws InterruptedException;

Source Link

Document

Inserts the specified element into this queue, waiting if necessary for space to become available.

Usage

From source file:org.apache.hadoop.raid.JRSEncoder.java

protected void encodeFileToStream(FileSystem fs, Path srcFile, long srcSize, long blockSize, OutputStream out,
        Progressable reporter) throws IOException {
    // (disable) One parity block can be written directly to out, rest to local files.
    //tmpOuts[0] = out;

    //File[] tmpFiles = new File[paritySize];
    byte[][] bufs = new byte[paritySize][];

    /*/*w ww.  j a  va 2s. co  m*/
     * signal queue to trigger ouput
     * No need blocking queue (adjust in the future)
     */
    BlockingQueue<byte[]> closedBuf = new ArrayBlockingQueue<byte[]>(14);

    /*
     * Output thread
     */
    DataSender ds = new DataSender(closedBuf, out, blockSize, srcSize);
    Thread dst = new Thread(ds);
    dst.start();

    // Loop over stripes in the file.
    for (long stripeStart = 0; stripeStart < srcSize; stripeStart += blockSize * stripeSize) {
        reporter.progress();

        LOG.info("Starting encoding of stripe " + srcFile + ":" + stripeStart);

        /*
         * create temp file to write parity block (one file for each block)
         */
        for (int i = 0; i < paritySize; i++) {
            //tmpFiles[i] = File.createTempFile("parity", "_" + i); 
            //LOG.info("Created tmp file " + tmpFiles[i]);
            //tmpFiles[i].deleteOnExit();
            bufs[i] = new byte[(int) blockSize];
        }

        // Create input streams for blocks in the stripe.
        InputStream[] blocks = stripeInputs(fs, srcFile, stripeStart, srcSize, blockSize);

        /*
         * encode data
         */
        encodeStripe(blocks, stripeStart, blockSize, bufs, reporter);

        /*
         * triger output
         */
        for (int i = 0; i < paritySize; i++) {
            try {
                closedBuf.put(bufs[i]);
            } catch (InterruptedException e) {
            }
            reporter.progress();
        }
    }

    try {
        //waiting for the end of output
        dst.join();
    } catch (InterruptedException e) {
        LOG.info("thread join interrupted");
    }
}

From source file:io.nats.client.ITClusterTest.java

@Test
public void testHotSpotReconnect() throws InterruptedException {
    int numClients = 100;
    ExecutorService executor = Executors.newFixedThreadPool(numClients,
            new NatsThreadFactory("testhotspotreconnect"));

    final BlockingQueue<String> rch = new LinkedBlockingQueue<String>();
    final BlockingQueue<Integer> dch = new LinkedBlockingQueue<Integer>();
    final AtomicBoolean shutdown = new AtomicBoolean(false);
    try (NatsServer s1 = runServerOnPort(1222)) {
        try (NatsServer s2 = runServerOnPort(1224)) {
            try (NatsServer s3 = runServerOnPort(1226)) {

                final class NATSClient implements Runnable {
                    Connection nc = null;
                    final AtomicInteger numReconnects = new AtomicInteger(0);
                    final AtomicInteger numDisconnects = new AtomicInteger(0);
                    String currentUrl = null;
                    final AtomicInteger instance = new AtomicInteger(-1);

                    final Options opts;

                    NATSClient(int inst) {
                        this.instance.set(inst);
                        opts = defaultOptions();
                        opts.servers = Nats.processUrlArray(testServers);

                        opts.disconnectedCb = new DisconnectedCallback() {
                            public void onDisconnect(ConnectionEvent event) {
                                numDisconnects.incrementAndGet();
                                try {
                                    dch.put(instance.get());
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }/* w  w w.j ava2s .c o  m*/
                                nc.setDisconnectedCallback(null);
                            }
                        };
                        opts.reconnectedCb = new ReconnectedCallback() {
                            public void onReconnect(ConnectionEvent event) {
                                numReconnects.incrementAndGet();
                                currentUrl = nc.getConnectedUrl();
                                try {
                                    rch.put(currentUrl);
                                } catch (InterruptedException e) {
                                    e.printStackTrace();
                                }
                            }
                        };
                    }

                    @Override
                    public void run() {
                        try {
                            nc = opts.connect();
                            assertTrue(!nc.isClosed());
                            assertNotNull(nc.getConnectedUrl());
                            currentUrl = nc.getConnectedUrl();
                            // System.err.println("Instance " + instance + " connected to " +
                            // currentUrl);
                            while (!shutdown.get()) {
                                sleep(10);
                            }
                            nc.close();
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                    }

                    public synchronized boolean isConnected() {
                        return (nc != null && !nc.isClosed());
                    }

                    public void shutdown() {
                        shutdown.set(true);
                    }
                }

                List<NATSClient> tasks = new ArrayList<NATSClient>(numClients);
                for (int i = 0; i < numClients; i++) {
                    NATSClient task = new NATSClient(i);
                    tasks.add(task);
                    executor.submit(task);
                }

                Map<String, Integer> cs = new HashMap<String, Integer>();

                int numReady = 0;
                while (numReady < numClients) {
                    numReady = 0;
                    for (NATSClient cli : tasks) {
                        if (cli.isConnected()) {
                            numReady++;
                        }
                    }
                    sleep(100);
                }

                s1.shutdown();
                sleep(1000);

                int disconnected = 0;
                // wait for disconnects
                while (dch.size() > 0 && disconnected < numClients) {
                    Integer instance = -1;
                    instance = dch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for disconnect signal", instance);
                    disconnected++;
                }
                assertTrue(disconnected > 0);

                int reconnected = 0;
                // wait for reconnects
                for (int i = 0; i < disconnected; i++) {
                    String url = null;
                    while (rch.size() == 0) {
                        sleep(50);
                    }
                    url = rch.poll(5, TimeUnit.SECONDS);
                    assertNotNull("timed out waiting for reconnect signal", url);
                    reconnected++;
                    Integer count = cs.get(url);
                    if (count != null) {
                        cs.put(url, ++count);
                    } else {
                        cs.put(url, 1);
                    }
                }

                for (NATSClient client : tasks) {
                    client.shutdown();
                }
                executor.shutdownNow();
                assertTrue(executor.awaitTermination(2, TimeUnit.SECONDS));

                assertEquals(disconnected, reconnected);

                int numServers = 2;

                assertEquals(numServers, cs.size());

                int expected = numClients / numServers;
                // We expect a 40 percent variance
                int var = (int) ((float) expected * 0.40);

                int delta = Math.abs(cs.get(testServers[2]) - cs.get(testServers[4]));
                // System.err.printf("var = %d, delta = %d\n", var, delta);
                if (delta > var) {
                    String str = String.format("Connected clients to servers out of range: %d/%d", delta, var);
                    fail(str);
                }
            }
        }
    }
}

From source file:org.apache.hadoop.raid.IAEncoder.java

protected void encodeFileToStream(FileSystem fs, Path srcFile, long srcSize, long blockSize, OutputStream out,
        Progressable reporter) throws IOException {
    // (disable) One parity block can be written directly to out, rest to local files.
    //tmpOuts[0] = out;

    //File[] tmpFiles = new File[paritySize];

    /*//from  www  .  j  a  v a 2 s  .c  o m
     * signal queue to trigger ouput
     * No need blocking queue (adjust in the future)
     */
    BlockingQueue<byte[]> closedBuf = new ArrayBlockingQueue<byte[]>(14);

    /*
     * Output thread
     */
    DataSender ds = new DataSender(closedBuf, out, blockSize, srcSize);
    Thread dst = new Thread(ds);
    dst.start();

    // Loop over stripes in the file.
    for (long stripeStart = 0; stripeStart < srcSize; stripeStart += blockSize * stripeSize) {
        reporter.progress();

        //LOG.info("Starting encoding of stripe " + srcFile + ":" + stripeStart);

        byte[][] bufs = new byte[paritySize][];
        /*
         * create temp file to write parity block (one file for each block)
         */
        //LOG.info("allocating memory mcount: "+mcount);
        for (int i = 0; i < paritySize; i++) {
            //tmpFiles[i] = File.createTempFile("parity", "_" + i); 
            //LOG.info("Created tmp file " + tmpFiles[i]);
            //tmpFiles[i].deleteOnExit();
            //LOG.info("allocating memory index: "+i);
            bufs[i] = new byte[(int) blockSize];
            mcount++;
        }
        //LOG.info("allocated memory");

        // Create input streams for blocks in the stripe.
        InputStream[] blocks = stripeInputs(fs, srcFile, stripeStart, srcSize, blockSize);

        //LOG.info("created InputStream");
        /*
         * encode data
         */
        encodeStripe(blocks, stripeStart, blockSize, bufs, reporter);

        /*
         * triger output
         */

        //LOG.info("encoded stripe");
        for (int i = 0; i < paritySize; i++) {
            try {
                closedBuf.put(bufs[i]);
            } catch (InterruptedException e) {
            }
            reporter.progress();
        }
        //LOG.info("push closed buf");
    }

    try {
        //waiting for the end of output
        dst.join();
        LOG.info("dst joined");
    } catch (InterruptedException e) {
        LOG.info("thread join interrupted");
    }
}

From source file:com.xorcode.andtweet.AndTweetService.java

private int restoreQueue(BlockingQueue<CommandData> q, String prefsFileName) {
    Context context = MyPreferences.getContext();
    int count = 0;
    if (SharedPreferencesUtil.exists(context, prefsFileName)) {
        boolean done = false;
        SharedPreferences sp = MyPreferences.getSharedPreferences(prefsFileName, MODE_PRIVATE);
        do {/*  w w w  .j av  a  2  s.  c o m*/
            CommandData cd = new CommandData(sp, count);
            if (cd.command == CommandEnum.UNKNOWN) {
                done = true;
            } else {
                try {
                    q.put(cd);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                MyLog.v(TAG, "Command restored: " + cd.toString());
                count += 1;
            }
        } while (!done);
        sp = null;
        // Delete this saved queue
        SharedPreferencesUtil.delete(context, prefsFileName);
        MyLog.d(TAG, "Queue restored from " + prefsFileName + ", " + count + " msgs");
    }
    return count;
}

From source file:edu.cmu.cs.lti.ark.fn.Semafor.java

/**
 * Reads conll sentences, parses them, and writes the json-serialized results.
 *
 * @param inputSupplier where to read conll sentences from
 * @param outputSupplier where to write the results to
 * @param numThreads the number of threads to use
 * @throws IOException//from  w  w w  .j  a  va  2 s . co m
 * @throws InterruptedException
 */
public void runParser(final InputSupplier<? extends Readable> inputSupplier,
        final OutputSupplier<? extends Writer> outputSupplier, final int numThreads)
        throws IOException, InterruptedException {
    // use the producer-worker-consumer pattern to parse all sentences in multiple threads, while keeping
    // output in order.
    final BlockingQueue<Future<Optional<SemaforParseResult>>> results = Queues
            .newLinkedBlockingDeque(5 * numThreads);
    final ExecutorService workerThreadPool = newFixedThreadPool(numThreads);
    // try to shutdown gracefully. don't worry too much if it doesn't work
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            try {
                workerThreadPool.shutdown();
                workerThreadPool.awaitTermination(5, TimeUnit.SECONDS);
            } catch (InterruptedException ignored) {
            }
        }
    }));

    final PrintWriter output = new PrintWriter(outputSupplier.getOutput());
    try {
        // Start thread to fetch computed results and write to file
        final Thread consumer = new Thread(new Runnable() {
            @Override
            public void run() {
                while (!Thread.currentThread().isInterrupted()) {
                    try {
                        final Optional<SemaforParseResult> oResult = results.take().get();
                        if (!oResult.isPresent())
                            break; // got poison pill. we're done
                        output.println(oResult.get().toJson());
                        output.flush();
                    } catch (Exception e) {
                        e.printStackTrace();
                        throw new RuntimeException(e);
                    }
                }
            }
        });
        consumer.start();
        // in main thread, put placeholders on results queue (so results stay in order), then
        // tell a worker thread to fill up the placeholder
        final SentenceCodec.SentenceIterator sentences = ConllCodec.readInput(inputSupplier.getInput());
        try {
            int i = 0;
            while (sentences.hasNext()) {
                final Sentence sentence = sentences.next();
                final int sentenceId = i;
                results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() {
                    @Override
                    public Optional<SemaforParseResult> call() throws Exception {
                        final long start = System.currentTimeMillis();
                        try {
                            final SemaforParseResult result = parseSentence(sentence);
                            final long end = System.currentTimeMillis();
                            System.err.printf("parsed sentence %d in %d millis.%n", sentenceId, end - start);
                            return Optional.of(result);
                        } catch (Exception e) {
                            e.printStackTrace();
                            throw e;
                        }
                    }
                }));
                i++;
            }
            // put a poison pill on the queue to signal that we're done
            results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() {
                @Override
                public Optional<SemaforParseResult> call() throws Exception {
                    return Optional.absent();
                }
            }));
            workerThreadPool.shutdown();
        } finally {
            closeQuietly(sentences);
        }
        // wait for consumer to finish
        consumer.join();
    } finally {
        closeQuietly(output);
    }
    System.err.println("Done.");
}

From source file:net.yacy.http.servlets.YaCyDefaultServlet.java

/**
 * TODO: add same functionality & checks as in HTTPDemon.parseMultipart
 *
 * parse multi-part form data for formfields, see also original
 * implementation in HTTPDemon.parseMultipart
 *
 * For file data the parameter for the formfield contains the filename and a
 * additional parameter with appendix [fieldname]$file conteins the upload content
 * (e.g. <input type="file" name="upload">  upload="local/filename" upload$file=[content])
 *
 * @param request//from w  ww  . j ava 2  s .c o m
 * @param args found fields/values are added to the map
 */
protected void parseMultipart(final HttpServletRequest request, final serverObjects args) throws IOException {

    // reject too large uploads
    if (request.getContentLength() > SIZE_FILE_THRESHOLD)
        throw new IOException("FileUploadException: uploaded file too large = " + request.getContentLength());

    // check if we have enough memory
    if (!MemoryControl.request(request.getContentLength() * 3, false)) {
        throw new IOException("not enough memory available for request. request.getContentLength() = "
                + request.getContentLength() + ", MemoryControl.available() = " + MemoryControl.available());
    }
    ServletFileUpload upload = new ServletFileUpload(DISK_FILE_ITEM_FACTORY);
    upload.setFileSizeMax(SIZE_FILE_THRESHOLD);
    try {
        // Parse the request to get form field items
        List<FileItem> fileItems = upload.parseRequest(request);
        // Process the uploaded file items
        Iterator<FileItem> i = fileItems.iterator();
        final BlockingQueue<Map.Entry<String, byte[]>> files = new LinkedBlockingQueue<>();
        while (i.hasNext()) {
            FileItem item = i.next();
            if (item.isFormField()) {
                // simple text
                if (item.getContentType() == null || !item.getContentType().contains("charset")) {
                    // old yacy clients use their local default charset, on most systems UTF-8 (I hope ;)
                    args.add(item.getFieldName(), item.getString(StandardCharsets.UTF_8.name()));
                } else {
                    // use default encoding (given as header or ISO-8859-1)
                    args.add(item.getFieldName(), item.getString());
                }
            } else {
                // read file upload
                args.add(item.getFieldName(), item.getName()); // add the filename to the parameters
                InputStream filecontent = null;
                try {
                    filecontent = item.getInputStream();
                    files.put(new AbstractMap.SimpleEntry<String, byte[]>(item.getFieldName(),
                            FileUtils.read(filecontent)));
                } catch (IOException e) {
                    ConcurrentLog.info("FILEHANDLER", e.getMessage());
                } finally {
                    if (filecontent != null)
                        try {
                            filecontent.close();
                        } catch (IOException e) {
                            ConcurrentLog.info("FILEHANDLER", e.getMessage());
                        }
                }
            }
        }
        if (files.size() <= 1) { // TODO: should include additonal checks to limit parameter.size below rel. large SIZE_FILE_THRESHOLD
            for (Map.Entry<String, byte[]> job : files) { // add the file content to parameter fieldname$file
                String n = job.getKey();
                byte[] v = job.getValue();
                String filename = args.get(n);
                if (filename != null && filename.endsWith(".gz")) {
                    // transform this value into base64
                    String b64 = Base64Order.standardCoder.encode(v);
                    args.put(n + "$file", b64);
                    args.remove(n);
                    args.put(n, filename + ".base64");
                } else {
                    args.put(n + "$file", v); // the byte[] is transformed into UTF8. You cannot push binaries here
                }
            }
        } else {
            // do this concurrently (this would all be superfluous if serverObjects could store byte[] instead only String)
            int t = Math.min(files.size(), Runtime.getRuntime().availableProcessors());
            final Map.Entry<String, byte[]> POISON = new AbstractMap.SimpleEntry<>(null, null);
            Thread[] p = new Thread[t];
            for (int j = 0; j < t; j++) {
                files.put(POISON);
                p[j] = new Thread("YaCyDefaultServlet.parseMultipart-" + j) {
                    @Override
                    public void run() {
                        Map.Entry<String, byte[]> job;
                        try {
                            while ((job = files.take()) != POISON) {
                                String n = job.getKey();
                                byte[] v = job.getValue();
                                String filename = args.get(n);
                                String b64 = Base64Order.standardCoder.encode(v);
                                synchronized (args) {
                                    args.put(n + "$file", b64);
                                    args.remove(n);
                                    args.put(n, filename + ".base64");
                                }
                            }
                        } catch (InterruptedException e) {
                        }
                    }
                };
                p[j].start();
            }
            for (int j = 0; j < t; j++)
                p[j].join();
        }
    } catch (Exception ex) {
        ConcurrentLog.info("FILEHANDLER", ex.getMessage());
    }
}

From source file:com.nridge.connector.ws.con_ws.core.SiteCrawler.java

/**
 * This method is called when a page is fetched and ready
 * to be processed by your program./*w  w  w. j  ava  2s.co m*/
 *
 * @param aPage Web page to visit.
 */
@Override
public void visit(Page aPage) {
    CrawlController crawlController = getMyController();
    AppMgr appMgr = (AppMgr) crawlController.getCustomData();

    Logger appLogger = appMgr.getLogger(this, "visit");

    appLogger.trace(appMgr.LOGMSG_TRACE_ENTER);

    String urlString = aPage.getWebURL().getURL();
    CrawlQueue crawlQueue = (CrawlQueue) appMgr.getProperty(Connector.PROPERTY_CRAWL_QUEUE);
    BlockingQueue extractQueue = (BlockingQueue) appMgr.getProperty(Connector.QUEUE_EXTRACT_NAME);
    boolean isCrawlJavaScript = appMgr.getBoolean(Constants.CFG_PROPERTY_PREFIX + ".extract.crawl_javascript",
            false);

    if ((crawlQueue == null) || (extractQueue == null))
        appLogger.error("Internal Error: Crawl/Extract queue is null.");
    else {
        StopWatch stopWatch = new StopWatch();
        stopWatch.start();

        Document wsDocument = createDocument(appMgr, crawlQueue, aPage);
        DataBag wsBag = wsDocument.getBag();
        if (aPage.getParseData() instanceof BinaryParseData)
            fetchDocument(appMgr, aPage, wsDocument);
        else if (aPage.getParseData() instanceof HtmlParseData) {
            HtmlParseData htmlParseData = (HtmlParseData) aPage.getParseData();

            if (isCrawlJavaScript)
                fetchPageUsingBrowser(appMgr, aPage, wsDocument);
            else {
                assignHTMLTags(appMgr, aPage, wsDocument);
                DataField dataField = wsBag.getFirstFieldByFeatureName(Field.FEATURE_IS_CONTENT);
                if (dataField != null) {
                    String pageText = htmlParseData.getText();
                    if (StringUtils.isNotEmpty(pageText))
                        dataField.setValue(pageText);
                }
            }
        } else {
            wsDocument = null;
            String msgStr = String.format("Unknown Parse Date Type '%s': %s", aPage.getParseData(), urlString);
            appLogger.error(msgStr);
        }

        if (wsDocument != null) {
            String docId = wsBag.getValueAsString("nsd_id");
            String queueBagPathFileName = crawlQueue.docPathFileName(Connector.QUEUE_EXTRACT_NAME, docId);
            DocumentXML documentXML = new DocumentXML(wsDocument);
            try {
                documentXML.save(queueBagPathFileName);
            } catch (IOException e) {
                wsDocument = null;
                String msgStr = String.format("%s: %s", queueBagPathFileName, e.getMessage());
                appLogger.error(msgStr);
            }

            stopWatch.stop();

            if (wsDocument != null) {
                String queueItem = Connector.queueItemIdPhaseTime(docId, Connector.PHASE_EXTRACT,
                        stopWatch.getTime());
                try {
                    // If queue is full, this thread may block.
                    extractQueue.put(queueItem);
                } catch (InterruptedException e) {
                    // Restore the interrupted status so parent can handle (if it wants to).
                    Thread.currentThread().interrupt();
                }
            }
        }
    }

    appLogger.trace(appMgr.LOGMSG_TRACE_DEPART);
}

From source file:net.yacy.document.importer.MediawikiImporter.java

@Override
public void run() {
    this.start = System.currentTimeMillis();
    final int threads = Math.max(2, Runtime.getRuntime().availableProcessors() - 1);
    // out keeps a outputfile open until poisened, to make sure underlaying thread gets the end condition
    // regardless of any exception (e.g. eof memory) a add(poison) is added to the most outer final block
    final BlockingQueue<wikiparserrecord> out = new ArrayBlockingQueue<wikiparserrecord>(threads * 10);
    final wikiparserrecord poison = newRecord();
    try {/*from   w w w. j av  a2  s.  c o m*/
        String targetstub = this.sourcefile.getName();
        int p = targetstub.lastIndexOf("\\.");
        if (p > 0)
            targetstub = targetstub.substring(0, p);
        InputStream is = new BufferedInputStream(new FileInputStream(this.sourcefile), 1024 * 1024);
        if (this.sourcefile.getName().endsWith(".bz2")) {
            is = new BZip2CompressorInputStream(is);
        } else if (this.sourcefile.getName().endsWith(".gz")) {
            is = new GZIPInputStream(is);
        }
        final BufferedReader r = new BufferedReader(new java.io.InputStreamReader(is, StandardCharsets.UTF_8),
                4 * 1024 * 1024);
        String t;
        StringBuilder sb = new StringBuilder();
        boolean page = false, text = false;
        String title = null;
        final BlockingQueue<wikiparserrecord> in = new ArrayBlockingQueue<wikiparserrecord>(threads * 10);
        final ExecutorService service = Executors.newCachedThreadPool(
                new NamePrefixThreadFactory(MediawikiImporter.class.getSimpleName() + ".convertConsumer"));
        final convertConsumer[] consumers = new convertConsumer[threads];
        final Future<?>[] consumerResults = (Future<?>[]) Array.newInstance(Future.class, threads);
        for (int i = 0; i < threads; i++) {
            consumers[i] = new convertConsumer(in, out, poison);
            consumerResults[i] = service.submit(consumers[i]);
        }
        final convertWriter writer = new convertWriter(out, poison, this.targetdir, targetstub);
        final Future<Integer> writerResult = service.submit(writer);

        wikiparserrecord record;
        int q;
        while ((t = r.readLine()) != null) {
            if ((p = t.indexOf("<base>", 0)) >= 0 && (q = t.indexOf("</base>", p)) > 0) {
                //urlStub = "http://" + lang + ".wikipedia.org/wiki/";
                this.urlStub = t.substring(p + 6, q);
                if (!this.urlStub.endsWith("/")) {
                    q = this.urlStub.lastIndexOf('/');
                    if (q > 0)
                        this.urlStub = this.urlStub.substring(0, q + 1);
                }
                final DigestURL uri = new DigestURL(this.urlStub);
                this.hostport = uri.getHost();
                if (uri.getPort() != 80)
                    this.hostport += ":" + uri.getPort();
                continue;
            }
            if (t.indexOf(pagestart) >= 0) {
                page = true;
                continue;
            }
            if ((p = t.indexOf(textstart)) >= 0) {
                text = page;
                q = t.indexOf('>', p + textstart.length());
                if (q > 0) {
                    final int u = t.indexOf(textend, q + 1);
                    if (u > q) {
                        sb.append(t.substring(q + 1, u));
                        ConcurrentLog.info("WIKITRANSLATION", "[INJECT] Title: " + title);
                        if (sb.length() == 0) {
                            ConcurrentLog.info("WIKITRANSLATION", "ERROR: " + title + " has empty content");
                            continue;
                        }
                        record = newRecord(this.hostport, this.urlStub, title, sb);
                        try {
                            in.put(record);
                            this.count++;
                        } catch (final InterruptedException e1) {
                            ConcurrentLog.logException(e1);
                        }
                        sb = new StringBuilder(200);
                        continue;
                    }
                    sb.append(t.substring(q + 1));
                }
                continue;
            }
            if (t.indexOf(textend) >= 0) {
                text = false;
                ConcurrentLog.info("WIKITRANSLATION", "[INJECT] Title: " + title);
                if (sb.length() == 0) {
                    ConcurrentLog.info("WIKITRANSLATION", "ERROR: " + title + " has empty content");
                    continue;
                }
                record = newRecord(this.hostport, this.urlStub, title, sb);
                try {
                    in.put(record);
                    this.count++;
                } catch (final InterruptedException e1) {
                    ConcurrentLog.logException(e1);
                }
                sb = new StringBuilder(200);
                continue;
            }
            if (t.indexOf(pageend) >= 0) {
                page = false;
                continue;
            }
            if ((p = t.indexOf("<title>", 0)) >= 0) {
                title = t.substring(p + 7);
                q = title.indexOf("</title>", 0);
                if (q >= 0)
                    title = title.substring(0, q);
                continue;
            }
            if (text) {
                sb.append(t);
                sb.append('\n');
            }
        }
        r.close();

        try {
            for (int i = 0; i < threads; i++) {
                in.put(poison);
            }
            for (int i = 0; i < threads; i++) {
                consumerResults[i].get(10000, TimeUnit.MILLISECONDS);
            }
        } catch (final InterruptedException e) {
            ConcurrentLog.logException(e);
        } catch (final ExecutionException e) {
            ConcurrentLog.logException(e);
        } catch (final TimeoutException e) {
            ConcurrentLog.logException(e);
        } catch (final Exception e) {
            ConcurrentLog.logException(e);
        } finally {
            out.put(poison); // output thread condition (for file.close)
            writerResult.get(10000, TimeUnit.MILLISECONDS);
        }
    } catch (final IOException e) {
        ConcurrentLog.logException(e);
    } catch (final Exception e) {
        ConcurrentLog.logException(e);
    } finally {
        try {
            out.put(poison); // out keeps output file open until poisened, to close file if exception happend in this block
        } catch (InterruptedException ex) {
        }
    }
}

From source file:eu.project.ttc.tools.TermSuitePipeline.java

public DocumentStream stream(CasConsumer consumer) {
    try {//from ww  w .ja v a2s . c o  m
        String id = new BigInteger(130, new SecureRandom()).toString(8);
        String casConsumerName = "pipeline-" + id + "-consumer";
        ConsumerRegistry.getInstance().registerConsumer(casConsumerName, consumer);
        String queueName = "pipeline-" + id + "-queue";
        final BlockingQueue<CollectionDocument> q = QueueRegistry.getInstance().registerQueue(queueName, 10);

        /*
         * 1- Creates the streaming collection reader desc
         */
        this.crDescription = CollectionReaderFactory.createReaderDescription(StreamingCollectionReader.class,
                StreamingCollectionReader.PARAM_LANGUAGE, this.lang.getCode(),
                StreamingCollectionReader.PARAM_NAME, queueName, StreamingCollectionReader.PARAM_QUEUE_NAME,
                queueName);

        /*
         * 2- Aggregate the consumer AE
         */
        AnalysisEngineDescription consumerAE = AnalysisEngineFactory.createEngineDescription(
                StreamingCasConsumer.class, StreamingCasConsumer.PARAM_CONSUMER_NAME, casConsumerName);
        this.aggregateBuilder.add(consumerAE);

        /*
         * 3- Starts the pipeline in a separate Thread 
         */
        this.streamThread = new Thread() {
            @Override
            public void run() {
                runPipeline();
            }
        };
        this.streamThread.start();

        /*
         * 4- Bind user inputs to the queue
         */
        documentProvider = new DocumentProvider() {
            @Override
            public void provide(CollectionDocument doc) {
                try {
                    q.put(doc);
                } catch (InterruptedException e) {
                    LOGGER.warn("Interrupted while there were more documents waiting.");
                }
            }
        };
        return new DocumentStream(streamThread, documentProvider, consumer, queueName);
    } catch (Exception e) {
        throw new TermSuitePipelineException(e);
    }
}

From source file:org.micromanager.asidispim.AcquisitionPanel.java

/**
 * The basic method for adding images to an existing data set. If the
 * acquisition was not previously initialized, it will attempt to initialize
 * it from the available image data. This version uses a blocking queue and is 
 * much faster than the one currently implemented in the ScriptInterface
 * Eventually, this function should be replaced by the ScriptInterface version
 * of the same.//from w w w. ja va  2  s .  com
 * @param acq - MMAcquisition object to use (old way used acquisition name and then
 *  had to call deprecated function on every call, now just pass acquisition object
 * @param frame - frame nr at which to insert the image
 * @param channel - channel at which to insert image
 * @param slice - (z) slice at which to insert image
 * @param position - position at which to insert image
 * @param ms - Time stamp to be added to the image metadata
 * @param taggedImg - image + metadata to be added
 * @param bq - Blocking queue to which the image should be added.  This queue
 * should be hooked up to the ImageCache belonging to this acquisitions
 * @throws java.lang.InterruptedException
 * @throws org.micromanager.utils.MMScriptException
 */
private void addImageToAcquisition(MMAcquisition acq, int frame, int channel, int slice, int position, long ms,
        TaggedImage taggedImg, BlockingQueue<TaggedImage> bq) throws MMScriptException, InterruptedException {

    // verify position number is allowed 
    if (acq.getPositions() <= position) {
        throw new MMScriptException("The position number must not exceed declared" + " number of positions ("
                + acq.getPositions() + ")");
    }

    // verify that channel number is allowed 
    if (acq.getChannels() <= channel) {
        throw new MMScriptException("The channel number must not exceed declared" + " number of channels ("
                + +acq.getChannels() + ")");
    }

    JSONObject tags = taggedImg.tags;

    if (!acq.isInitialized()) {
        throw new MMScriptException("Error in the ASIdiSPIM logic.  Acquisition should have been initialized");
    }

    // create required coordinate tags
    try {
        MDUtils.setFrameIndex(tags, frame);
        tags.put(MMTags.Image.FRAME, frame);
        MDUtils.setChannelIndex(tags, channel);
        MDUtils.setChannelName(tags, channelNames_[channel]);
        MDUtils.setSliceIndex(tags, slice);
        MDUtils.setPositionIndex(tags, position);
        MDUtils.setElapsedTimeMs(tags, ms);
        MDUtils.setImageTime(tags, MDUtils.getCurrentTime());
        MDUtils.setZStepUm(tags, zStepUm_);

        // save cached positions of SPIM head for this stack
        tags.put("SPIM_Position_X", xPositionUm_); // TODO consider computing accurate X position per slice for stage scanning data
        tags.put("SPIM_Position_Y", yPositionUm_);
        tags.put("SPIM_Position_Z", zPositionUm_); // NB this is SPIM head position, not position in stack

        if (!tags.has(MMTags.Summary.SLICES_FIRST) && !tags.has(MMTags.Summary.TIME_FIRST)) {
            // add default setting
            tags.put(MMTags.Summary.SLICES_FIRST, true);
            tags.put(MMTags.Summary.TIME_FIRST, false);
        }

        if (acq.getPositions() > 1) {
            // if no position name is defined we need to insert a default one
            if (tags.has(MMTags.Image.POS_NAME)) {
                tags.put(MMTags.Image.POS_NAME, "Pos" + position);
            }
        }

        // update frames if necessary
        if (acq.getFrames() <= frame) {
            acq.setProperty(MMTags.Summary.FRAMES, Integer.toString(frame + 1));
        }

    } catch (JSONException e) {
        throw new MMScriptException(e);
    }

    bq.put(taggedImg);
}