Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue() 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE .

Usage

From source file:eu.stratosphere.nephele.services.iomanager.IOManager.java

/**
 * Creates a block channel reader that reads blocks from the given channel. The reader reads asynchronously,
 * such that a read request is accepted, carried out at some (close) point in time, and the full segment
 * is pushed to the reader's return queue.
 * <p>/*from w ww  .j ava 2  s  .c  om*/
 * The reader will collect a specified number of read requests and carry them out
 * in one, effectively reading one block in the size of multiple memory pages.
 * Note that this means that no memory segment will reach the return queue before
 * the given number of requests are collected, so the number of buffers used with
 * the reader should be greater than the number of requests to combine. Ideally,
 * the number of memory segments used is a multiple of the number of requests to
 * combine.
 * 
 * @param channelID The descriptor for the channel to write to.
 * @param numRequestsToCombine The number of write requests to combine to one I/O request.
 * @return A block channel reader that reads from the given channel.
 * @throws IOException Thrown, if the channel for the reader could not be opened.
 */
public BlockChannelReader createBlockChannelReader(Channel.ID channelID, int numRequestsToCombine)
        throws IOException {
    if (this.isClosed) {
        throw new IllegalStateException("I/O-Manger is closed.");
    }

    return new BlockChannelReader(channelID, this.readers[channelID.getThreadNum()].requestQueue,
            new LinkedBlockingQueue<MemorySegment>(), numRequestsToCombine);
}

From source file:com.navercorp.pinpoint.web.calltree.span.CallTreeIteratorTest.java

private Queue<Integer> parseExpected(String expectedValues) {
    if (expectedValues == null) {
        return null;
    }/*from  ww w. j  a v  a 2  s  . c  om*/

    String[] tokens = expectedValues.split(",");
    Queue<Integer> expected = new LinkedBlockingQueue<Integer>();
    for (String token : tokens) {
        expected.add(Integer.parseInt(token.trim()));
    }

    return expected;
}

From source file:de.tu_dortmund.ub.data.dswarm.TaskProcessingUnit.java

private static void executeIngests(final String[] files, final String dataModelID, final String resourceID,
        final String projectName, final String serviceName, final Integer engineThreads,
        final Properties config) throws Exception {

    // create job list
    final LinkedList<Callable<String>> filesToPush = new LinkedList<>();

    int cnt = 0;//  w  ww  .ja v a  2  s.c  o  m
    for (final String file : files) {

        cnt++;
        filesToPush.add(new Ingest(config, file, dataModelID, resourceID, projectName, cnt));
    }

    // work on jobs
    final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>());

    execute(serviceName, filesToPush, pool);
}

From source file:de.tu_dortmund.ub.data.dswarm.TaskProcessingUnit.java

private static void executeTransform(final String inputDataModelID, final String outputDataModelID,
        final Optional<Boolean> optionalDoIngestOnTheFly, final Optional<Boolean> optionalDoExportOnTheFly,
        final Optional<String> optionalExportMimeType, final Optional<String> optionalExportFileExtension,
        final Integer engineThreads, final String serviceName, final Properties config) throws Exception {

    // create job list
    final LinkedList<Callable<String>> transforms = new LinkedList<>();
    transforms.add(new Transform(config, inputDataModelID, outputDataModelID, optionalDoIngestOnTheFly,
            optionalDoExportOnTheFly, optionalExportMimeType, optionalExportFileExtension, 0));

    // work on jobs
    final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>());

    execute(serviceName, transforms, pool);
}

From source file:de.tu_dortmund.ub.data.dswarm.TaskProcessingUnit.java

private static void executeExport(final String exportDataModelID, final Optional<String> optionalExportMimeType,
        final Optional<String> optionalExportFileExtension, final Integer engineThreads,
        final String serviceName, final Properties config) throws Exception {

    // create job list
    final LinkedList<Callable<String>> exports = new LinkedList<>();
    exports.add(new Export(exportDataModelID, optionalExportMimeType, optionalExportFileExtension, config));

    // work on jobs
    final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>());

    execute(serviceName, exports, pool);
}

From source file:eu.stratosphere.pact.runtime.hash.MutableHashTable.java

protected boolean prepareNextPartition() throws IOException {
    // finalize and cleanup the partitions of the current table
    int buffersAvailable = 0;
    for (int i = 0; i < this.partitionsBeingBuilt.size(); i++) {
        final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(i);
        p.setFurtherPatitioning(this.furtherPartitioning);
        buffersAvailable += p.finalizeProbePhase(this.availableMemory, this.partitionsPending);
    }/*  w ww . j a  va  2  s  .c o  m*/

    this.partitionsBeingBuilt.clear();
    this.writeBehindBuffersAvailable += buffersAvailable;

    releaseTable();

    if (this.currentSpilledProbeSide != null) {
        this.currentSpilledProbeSide.closeAndDelete();
        this.currentSpilledProbeSide = null;
    }

    // check if there are pending partitions
    if (!this.partitionsPending.isEmpty()) {
        final HashPartition<BT, PT> p = this.partitionsPending.get(0);

        // build the next table
        buildTableFromSpilledPartition(p);

        // set the probe side - gather memory segments for reading
        LinkedBlockingQueue<MemorySegment> returnQueue = new LinkedBlockingQueue<MemorySegment>();
        this.currentSpilledProbeSide = this.ioManager
                .createBlockChannelReader(p.getProbeSideChannel().getChannelID(), returnQueue);

        List<MemorySegment> memory = new ArrayList<MemorySegment>();
        memory.add(getNextBuffer());
        memory.add(getNextBuffer());

        ChannelReaderInputViewIterator<PT> probeReader = new ChannelReaderInputViewIterator<PT>(
                this.currentSpilledProbeSide, returnQueue, memory, this.availableMemory,
                this.probeSideSerializer, p.getProbeSideBlockCount());
        this.probeIterator.set(probeReader);

        // unregister the pending partition
        this.partitionsPending.remove(0);
        this.currentRecursionDepth = p.getRecursionLevel() + 1;

        // recursively get the next
        return nextRecord();
    } else {
        // no more data
        return false;
    }
}

From source file:com.emc.vipr.sync.ViPRSync.java

public void run() {
    // Some validation (must have source and target)
    Assert.notNull(source, "source plugin must be specified");
    Assert.notNull(target, "target plugin must be specified");

    // set log level before we do anything else
    if (logLevel != null) {
        switch (logLevel) {
        case DEBUG_OPTION:
            LogManager.getRootLogger().setLevel(Level.DEBUG);
            break;
        case VERBOSE_OPTION:
            LogManager.getRootLogger().setLevel(Level.INFO);
            break;
        case QUIET_OPTION:
            LogManager.getRootLogger().setLevel(Level.WARN);
            break;
        case SILENT_OPTION:
            LogManager.getRootLogger().setLevel(Level.FATAL);
            break;
        }//from  w  w  w .j  a v  a  2  s. com
    }

    // filters are now fixed
    filters = Collections.unmodifiableList(filters);

    // Ask each plugin to validate the chain (resolves incompatible plugins)
    source.configure(source, filters.iterator(), target);
    target.configure(source, filters.iterator(), target);
    for (SyncFilter filter : filters) {
        filter.configure(source, filters.iterator(), target);
    }

    // Build the plugin chain
    Iterator<SyncFilter> i = filters.iterator();
    SyncFilter next, previous = null;
    while (i.hasNext()) {
        next = i.next();
        if (previous != null)
            previous.setNext(next);
        previous = next;
    }

    // add target to chain
    if (previous != null)
        previous.setNext(target);

    firstFilter = filters.isEmpty() ? target : filters.get(0);

    // register for timings
    if (timingsEnabled)
        TimingUtil.register(this, timingWindow);

    // create thread pools
    queryExecutor = new CountingExecutor(queryThreadCount, queryThreadCount, 15, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>());
    syncExecutor = new CountingExecutor(syncThreadCount, syncThreadCount, 15, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(syncThreadCount * 100));

    running = true;
    completedCount = 0;
    failedCount = 0;
    byteCount = 0;
    failedObjects = new HashSet<>();
    long lastCompletedCount = 0, lastFailedCount = 0, lastByteCount = 0;
    long intervalStart = startTime = System.currentTimeMillis();

    LogMF.info(l4j, "syncing from {0} to {1}", source.getSourceUri(), target.getTargetUri());

    // iterate through objects provided by source and submit tasks for syncing. these objects may have children,
    // in which case they will be submitted for crawling *after* they are synced.
    submitForSync(source);

    // now we must wait until all submitted tasks are complete
    while (running) {
        long now = System.currentTimeMillis();
        long interval = now - intervalStart;
        if (interval > 60000) { // dump stats every minute
            long completedInc = completedCount - lastCompletedCount;
            long failedInc = failedCount - lastFailedCount;
            long byteInc = byteCount - lastByteCount;
            LogMF.debug(l4j, "remaining tasks: {0}, active syncs: {1}, active queries: {2}",
                    syncExecutor.getRemainingTasks(), syncExecutor.getActiveCount(),
                    queryExecutor.getActiveCount());
            LogMF.info(l4j,
                    "since last report:\ncompleted: {0} ({1}/s), failed: {2}, bytes tranferred: {3} ({4}/s)",
                    new Object[] { completedInc, completedInc / interval, failedInc, byteInc,
                            byteInc / interval });
            intervalStart = now;
            lastCompletedCount = completedCount;
            lastFailedCount = failedCount;
            lastByteCount = byteCount;
        }
        if (queryExecutor.getRemainingTasks() <= 0 && syncExecutor.getRemainingTasks() <= 0) {
            // done
            l4j.info("all tasks complete; shutting down");
            queryExecutor.shutdown();
            syncExecutor.shutdown();
            break;
        } else {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                l4j.warn("interrupted while sleeping", e);
            }
        }
    }

    // run a final timing log
    TimingUtil.logTimings(source);

    if (!running) { // terminated early
        l4j.warn("terminated early; forcing shutdown of thread pools");
        queryExecutor.shutdownNow();
        syncExecutor.shutdownNow();
    }
    running = false;

    // clean up any resources in the plugins
    cleanup();
}

From source file:com.ibm.crail.tools.CrailBenchmark.java

void createFile(String filename, int loop) throws Exception, InterruptedException {
    System.out.println("createFile, filename " + filename + ", loop " + loop);

    //warmup/* w w  w.ja  va2s .c o  m*/
    ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>();
    CrailBuffer buf = fs.allocateBuffer();
    bufferQueue.add(buf);
    warmUp(filename, warmup, bufferQueue);
    fs.freeBuffer(buf);

    //benchmark
    System.out.println("starting benchmark...");
    fs.getStatistics().reset();
    LinkedBlockingQueue<String> pathQueue = new LinkedBlockingQueue<String>();
    fs.create(filename, CrailNodeType.DIRECTORY, CrailStorageClass.DEFAULT, CrailLocationClass.DEFAULT).get()
            .syncDir();
    int filecounter = 0;
    for (int i = 0; i < loop; i++) {
        String name = "" + filecounter++;
        String f = filename + "/" + name;
        pathQueue.add(f);
    }

    double ops = 0;
    long start = System.currentTimeMillis();
    while (!pathQueue.isEmpty()) {
        String path = pathQueue.poll();
        fs.create(path, CrailNodeType.DATAFILE, CrailStorageClass.DEFAULT, CrailLocationClass.DEFAULT).get()
                .syncDir();
    }
    long end = System.currentTimeMillis();
    double executionTime = ((double) (end - start)) / 1000.0;
    double latency = 0.0;
    if (executionTime > 0) {
        latency = 1000000.0 * executionTime / ops;
    }

    System.out.println("execution time " + executionTime);
    System.out.println("ops " + ops);
    System.out.println("latency " + latency);

    fs.getStatistics().print("close");
}

From source file:au.org.ala.layers.dao.LayerIntersectDAOImpl.java

ArrayList<String> localSampling(IntersectionFile[] intersectionFiles, double[][] points,
        IntersectCallback callback) {//from  w  w w  .jav  a 2  s  . c o m
    logger.info("begin LOCAL sampling, number of threads " + intersectConfig.getThreadCount()
            + ", number of layers=" + intersectionFiles.length + ", number of coordinates=" + points.length);
    long start = System.currentTimeMillis();
    int threadCount = intersectConfig.getThreadCount();
    SamplingThread[] threads = new SamplingThread[threadCount];
    LinkedBlockingQueue<Integer> lbq = new LinkedBlockingQueue();
    CountDownLatch cdl = new CountDownLatch(intersectionFiles.length);
    ArrayList<String> output = new ArrayList<String>();
    for (int i = 0; i < intersectionFiles.length; i++) {
        output.add("");
        lbq.add(i);
    }

    callback.setLayersToSample(intersectionFiles);
    logger.info("Initialising sampling threads: " + threadCount);
    for (int i = 0; i < threadCount; i++) {
        threads[i] = new SamplingThread(lbq, cdl, intersectionFiles, points, output,
                intersectConfig.getThreadCount(), intersectConfig.getShapeFileCache(),
                intersectConfig.getGridBufferSize(), callback);
        threads[i].start();
    }

    try {
        cdl.await();
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
    } finally {
        for (int i = 0; i < threadCount; i++) {
            try {
                threads[i].interrupt();
            } catch (Exception e) {
                logger.error(e.getMessage(), e);
            }
        }
    }

    logger.info("End sampling, threads=" + threadCount + " layers=" + intersectionFiles.length + " in "
            + (System.currentTimeMillis() - start) + "ms");
    return output;
}