Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:com.pinterest.rocksplicator.controller.DispatcherTest.java

@Test
public void testChainedTask() throws Exception {
    TaskBase task = new SleepIncrementTask(100).andThen(new SleepIncrementTask(150))
            .andThen(new SleepIncrementTask(200)).getEntity();

    final CountDownLatch latch = new CountDownLatch(3);
    FIFOTaskQueue tq = new FIFOTaskQueue(10) {
        @Override/*from   w  w w  . ja v  a 2  s .  c om*/
        public boolean finishTask(final long id, final String output) {
            latch.countDown();
            return super.finishTask(id, output);
        }

        @Override
        public long finishTaskAndEnqueueRunningTask(final long id, final String output, final TaskBase newTask,
                final String worker) {
            latch.countDown();
            return super.finishTaskAndEnqueueRunningTask(id, output, newTask, worker);
        }
    };
    tq.enqueueTask(task, Integer.toString(++nameCounter), 0);

    Semaphore idleWorkersSemaphore = new Semaphore(2);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(2, 2, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(2));
    WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, tq);
    TaskDispatcher dispatcher = new TaskDispatcher(2, idleWorkersSemaphore, workerPool, tq);
    dispatcher.start();

    Assert.assertTrue(latch.await(30, TimeUnit.SECONDS));
    Assert.assertEquals(SleepIncrementTask.executionCounter.intValue(), 3);

    Assert.assertEquals(tq.getResult(0), "0");
    Assert.assertEquals(tq.getResult(1), "1");
    Assert.assertEquals(tq.getResult(2), "2");
    dispatcher.stop();
}

From source file:org.apache.cxf.systest.jaxrs.AbstractJAXRSContinuationsTest.java

protected void doTestContinuation(String pathSegment) throws Exception {
    final String port = getPort();
    ThreadPoolExecutor executor = new ThreadPoolExecutor(5, 5, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(10));
    CountDownLatch startSignal = new CountDownLatch(1);
    CountDownLatch doneSignal = new CountDownLatch(1);
    List<BookWorker> workers = new ArrayList<>(5);
    for (int x = 1; x < 6; x++) {
        workers.add(new BookWorker("http://localhost:" + port + getBaseAddress() + pathSegment + "/" + x,
                Integer.toString(x), "CXF in Action" + x, startSignal, doneSignal));
    }/*from w  w  w.j  a v  a 2  s.  c  om*/
    for (BookWorker w : workers) {
        executor.execute(w);
    }

    startSignal.countDown();
    doneSignal.await(60, TimeUnit.SECONDS);
    executor.shutdownNow();
    assertEquals("Not all invocations have completed", 0, doneSignal.getCount());
    for (BookWorker w : workers) {
        w.checkError();
    }
}

From source file:org.geppetto.frontend.messaging.DefaultMessageSender.java

public void initialize(WsOutbound wsOutbound) {

    logger.info(String.format(/*from w w w. ja v  a2  s  .  co  m*/
            "Initializing message sender - queuing: %b, compression: %b, "
                    + "discard messages if queues full: %b",
            queuingEnabled, compressionEnabled, discardMessagesIfQueueFull));

    this.wsOutbound = wsOutbound;

    if (queuingEnabled) {

        RejectedExecutionHandler rejectedExecutionHandler;

        if (discardMessagesIfQueueFull) {
            rejectedExecutionHandler = new ThreadPoolExecutor.DiscardOldestPolicy();
        } else {
            rejectedExecutionHandler = new ThreadPoolExecutor.AbortPolicy();
        }

        preprocessorQueue = new ArrayBlockingQueue<>(maxQueueSize);

        preprocessorExecutor = new PausableThreadPoolExecutor(1, 1, 30, TimeUnit.SECONDS, preprocessorQueue,
                rejectedExecutionHandler);

        preprocessorExecutor.prestartAllCoreThreads();

        senderQueue = new ArrayBlockingQueue<>(maxQueueSize);

        senderExecutor = new PausableThreadPoolExecutor(1, 1, 30, TimeUnit.SECONDS, senderQueue,
                rejectedExecutionHandler);
        senderExecutor.prestartAllCoreThreads();
    }
}

From source file:gobblin.couchbase.writer.CouchbaseWriter.java

@Override
public Future<WriteResponse> write(final D record, final WriteCallback callback) {
    assertRecordWritable(record);//from  ww  w. j a  v a  2 s  . co m
    if (record instanceof TupleDocument) {
        ((TupleDocument) record).content().value1().retain();
    }
    Observable<D> observable = _bucket.async().upsert(record);
    if (callback == null) {
        return new WriteResponseFuture<>(
                observable.timeout(_operationTimeout, _operationTimeunit).toBlocking().toFuture(),
                _defaultWriteResponseMapper);
    } else {

        final AtomicBoolean callbackFired = new AtomicBoolean(false);
        final BlockingQueue<Pair<WriteResponse, Throwable>> writeResponseQueue = new ArrayBlockingQueue<>(1);

        final Future<WriteResponse> writeResponseFuture = new Future<WriteResponse>() {
            @Override
            public boolean cancel(boolean mayInterruptIfRunning) {
                return false;
            }

            @Override
            public boolean isCancelled() {
                return false;
            }

            @Override
            public boolean isDone() {
                return callbackFired.get();
            }

            @Override
            public WriteResponse get() throws InterruptedException, ExecutionException {
                Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.take();
                return getWriteResponseorThrow(writeResponseThrowablePair);
            }

            @Override
            public WriteResponse get(long timeout, TimeUnit unit)
                    throws InterruptedException, ExecutionException, TimeoutException {
                Pair<WriteResponse, Throwable> writeResponseThrowablePair = writeResponseQueue.poll(timeout,
                        unit);
                if (writeResponseThrowablePair == null) {
                    throw new TimeoutException("Timeout exceeded while waiting for future to be done");
                } else {
                    return getWriteResponseorThrow(writeResponseThrowablePair);
                }
            }
        };

        observable.timeout(_operationTimeout, _operationTimeunit).subscribe(new Subscriber<D>() {
            @Override
            public void onCompleted() {
            }

            @Override
            public void onError(Throwable e) {
                callbackFired.set(true);
                writeResponseQueue.add(new Pair<WriteResponse, Throwable>(null, e));
                callback.onFailure(e);
            }

            @Override
            public void onNext(D doc) {
                try {
                    callbackFired.set(true);
                    WriteResponse writeResponse = new GenericWriteResponse<D>(doc);
                    writeResponseQueue.add(new Pair<WriteResponse, Throwable>(writeResponse, null));
                    callback.onSuccess(writeResponse);
                } finally {
                    if (doc instanceof TupleDocument) {
                        ((TupleDocument) doc).content().value1().release();
                    }
                }
            }
        });
        return writeResponseFuture;
    }
}

From source file:ubic.gemma.loader.expression.arrayDesign.ArrayDesignProbeMapperServiceImpl.java

@Override
public void processArrayDesign(ArrayDesign arrayDesign, ProbeMapperConfig config, boolean useDB) {

    assert config != null;

    if (arrayDesign.getTechnologyType().equals(TechnologyType.NONE)) {
        throw new IllegalArgumentException(
                "Do not use this service to process platforms that do not use an probe-based technology.");
    }//from  ww w .  j  a va  2s.  c  o  m

    Collection<Taxon> taxa = arrayDesignService.getTaxa(arrayDesign.getId());

    Taxon taxon = arrayDesign.getPrimaryTaxon();
    if (taxa.size() > 1 && taxon == null) {
        throw new IllegalArgumentException(
                "Array design has sequence from multiple taxa and has no primary taxon set: " + arrayDesign);
    }

    GoldenPathSequenceAnalysis goldenPathDb = new GoldenPathSequenceAnalysis(taxon);

    BlockingQueue<BlatAssociation> persistingQueue = new ArrayBlockingQueue<BlatAssociation>(QUEUE_SIZE);
    AtomicBoolean generatorDone = new AtomicBoolean(false);
    AtomicBoolean loaderDone = new AtomicBoolean(false);

    load(persistingQueue, generatorDone, loaderDone);

    if (useDB) {
        log.info("Removing any old associations");
        arrayDesignService.deleteGeneProductAssociations(arrayDesign);
    }

    int count = 0;
    int hits = 0;
    log.info("Start processing " + arrayDesign.getCompositeSequences().size() + " probes ...");
    for (CompositeSequence compositeSequence : arrayDesign.getCompositeSequences()) {

        Map<String, Collection<BlatAssociation>> results = processCompositeSequence(config, taxon, goldenPathDb,
                compositeSequence);

        if (results == null)
            continue;

        for (Collection<BlatAssociation> col : results.values()) {
            for (BlatAssociation association : col) {
                if (log.isDebugEnabled())
                    log.debug(association);
            }

            if (useDB) {
                // persisting is done in a separate thread.
                persistingQueue.addAll(col);
            } else {
                printResult(compositeSequence, col);
            }
            ++hits;
        }

        if (++count % 200 == 0) {
            log.info("Processed " + count + " composite sequences" + " with blat results; " + hits
                    + " mappings found.");
        }
    }

    generatorDone.set(true);

    log.info("Waiting for loading to complete ...");
    while (!loaderDone.get()) {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }

    log.info("Processed " + count + " composite sequences with blat results; " + hits + " mappings found.");
    try {
        this.deleteOldFiles(arrayDesign);
    } catch (IOException e) {
        log.error("Failed to delete all old files associated with " + arrayDesign
                + ", be sure to clean them up manually or regenerate them");
    }
    arrayDesignReportService.generateArrayDesignReport(arrayDesign.getId());

}

From source file:okuyama.imdst.util.KeyManagerValueMap.java

/**
 * ??????<br>/*from w w  w .ja va2 s . c  om*/
 * Object??????
 */
public void initNoMemoryModeSetting(String lineFile) {
    try {
        if (sync == null)
            sync = new Object();

        readObjectFlg = true;

        this.tmpVacuumeLineFile = lineFile + ".vacuumtmp";
        this.tmpVacuumeCopyMapDirs = new String[5];
        this.tmpVacuumeCopyMapDirs[0] = lineFile + ".cpmapdir1/";
        this.tmpVacuumeCopyMapDirs[1] = lineFile + ".cpmapdir2/";
        this.tmpVacuumeCopyMapDirs[2] = lineFile + ".cpmapdir3/";
        this.tmpVacuumeCopyMapDirs[3] = lineFile + ".cpmapdir4/";
        this.tmpVacuumeCopyMapDirs[4] = lineFile + ".cpmapdir5/";

        // ??Value??Map?
        String[] overSizeDataStoreDirs = new String[1];
        for (int dirIdx = 0; dirIdx < 1; dirIdx++) {
            overSizeDataStoreDirs[dirIdx] = lineFile + "_" + dirIdx + "/";
        }

        if (this.overSizeDataStore == null)
            this.overSizeDataStore = new FileBaseDataMap(overSizeDataStoreDirs, 100000, 0.01,
                    ImdstDefine.saveDataMaxSize, ImdstDefine.dataFileWriteMaxSize * 5,
                    ImdstDefine.dataFileWriteMaxSize * 15);

        File valueFile = new File(lineFile);
        if (!valueFile.exists() || valueFile.length() < 1) {
            super.clear();
        }
        // ??BufferedWriter
        this.bw = new BufferedWriter(
                new OutputStreamWriter(new FileOutputStream(valueFile, true), ImdstDefine.keyWorkFileEncoding),
                1024 * 256);
        this.dataFileBufferUseCount = new AtomicInteger(0);

        // ??????
        if (ImdstDefine.dataFileWriteDelayFlg) {
            // ??
            this.raf = new CustomRandomAccess(new File(lineFile), "rw");
        } else {
            // ???
            //this.raf = new RandomAccessFile(new File(lineFile) , "rw");
            //this.raf = new SortedSchedulingRandomAccess(new File(lineFile) , "rw");
            this.raf = new HighSpeedDiskCacheRandomAccess(new File(lineFile), "rw", this.diskCacheFile);
        }
        // ???
        this.raf.setDataPointMap(this);

        // ????
        this.deletedDataPointList = new ArrayBlockingQueue(ImdstDefine.numberOfDeletedDataPoint);

        BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(new File(lineFile)),
                ImdstDefine.keyWorkFileEncoding));
        this.lineFile = lineFile;
        int counter = 0;

        // ????
        // ???????????()?????
        String readDataLine = null;
        while ((readDataLine = br.readLine()) != null) {

            counter++;
            boolean zeroDataFlg = false;
            int writeLen = this.oneDataLength;
            if (readDataLine.trim().length() == 0)
                zeroDataFlg = true;

            if (readDataLine.getBytes().length < this.oneDataLength) {
                int shiftByteSize = 0;
                if (readDataLine.length() < "(B)!0".length()) {
                    int shift = "(B)!0".length() - readDataLine.length();
                    shiftByteSize = shift;
                }
                readDataLine = "(B)!0";
                StringBuilder updateBuf = new StringBuilder(readDataLine);
                for (int i = 0; i < (this.oneDataLength - readDataLine.length()); i++) {
                    updateBuf.append("&");
                    shiftByteSize++;
                }

                if (!zeroDataFlg) {
                    updateBuf.append("\n");
                    writeLen = writeLen + 1;
                }

                shiftByteSize++;

                this.raf.seek(this.convertLineToSeekPoint(counter));
                this.raf.write(updateBuf.toString().getBytes(), 0, writeLen);
                for (int i = 0; i < shiftByteSize; i++) {
                    br.read();
                }
            }
        }

        this.lineCount = counter;
        br.close();

        // ???
        this.nowKeySize = super.size();
    } catch (Exception e) {
        e.printStackTrace();
        // 
        StatusUtil.setStatusAndMessage(1, "KeyManagerValueMap - init - Error [" + e.getMessage() + "]");
    }
}

From source file:com.scaleoutsoftware.soss.hserver.RunMapper.java

@SuppressWarnings("unchecked")
public RunMapper(HServerInvocationParameters invocationParameters)
        throws IOException, ClassNotFoundException, NoSuchMethodException {
    _logger.debug("Starting mapper. Parameters: " + invocationParameters);

    this.invocationParameters = invocationParameters;

    if (invocationParameters.isOldApi()) {
        mapperWrapper = new MapperWrapperMapred<INKEY, INVALUE, OUTKEY, OUTVALUE>(invocationParameters);
    } else {//  w  w w.  ja  va2s.c  o m
        mapperWrapper = new MapperWrapperMapreduce<INKEY, INVALUE, OUTKEY, OUTVALUE>(invocationParameters);
    }

    configuration = (Configuration) invocationParameters.getConfiguration();
    invocationId = invocationParameters.getAppId();

    //This happens under _jobLock, so we wont interfere with the running tasks

    runMapContext = new RunHadoopMapContext<OUTKEY, OUTVALUE>(
            invocationParameters.getHadoopPartitionToSossRegionMapping(), invocationParameters.getAppId(),
            HServerParameters.getSetting(MAP_OUTPUTCHUNKSIZE_KB, configuration),
            HServerParameters.getSetting(MAP_HASHTABLESIZE, configuration),
            HServerParameters.getSetting(MAP_MAXTEMPMEMORY_KB, configuration),
            mapperWrapper.getMapOutputKeyClass(), mapperWrapper.getMapOutputValueClass(), false, 0,
            mapperWrapper, mapperWrapper.getPartitioner(), configuration);

    isSingleResultOptimisation = invocationParameters.isSingleResultOptimisation();

    inputSplitList = invocationParameters.getInputSplits();

    List<Integer> splitIndexList = null;
    for (InetAddress address : NetUtils.getLocalInterfaces()) {
        splitIndexList = ((InvocationParameters<?>) invocationParameters).getInputSplitAssignment()
                .get(address);
        if (splitIndexList != null) {
            //Handle the workload sharing between multiple JVMs
            //We assume that split list for the IP comes to each JVM in the same order
            if (InvocationWorker.getNumberOfWorkers() > 1) {
                int listSize = splitIndexList.size();
                int splitsPerHost = Math.max(1, listSize / InvocationWorker.getNumberOfWorkers() + 1);
                int startIndex = splitsPerHost * InvocationWorker.getIgWorkerIndex();

                if (startIndex < listSize) {
                    int stopIndex = Math.min(listSize, startIndex + splitsPerHost);
                    splitIndexList = splitIndexList.subList(startIndex, stopIndex);
                } else {
                    splitIndexList = Collections.EMPTY_LIST;
                }
                _logger.warn("Split list to process:" + splitIndexList + ", ;" + listSize + "," + splitsPerHost
                        + "," + startIndex);
            }
            break;
        }
    }

    if (splitIndexList != null && splitIndexList.size() > 0) { //We found our split list
        numberOfSplits = splitIndexList.size();
        splitIndexesForThisHost = new ArrayBlockingQueue<Integer>(numberOfSplits);
        splitIndexesForThisHost.addAll(splitIndexList);
        numberOfWorkers = Math.max(1,
                Math.min(invocationParameters.getNumberOfSlotsPerNode(), splitIndexesForThisHost.size()));

    } else { //Short circuit the mapper
        numberOfSplits = 0;
        splitIndexesForThisHost = null;
        numberOfWorkers = 0;
    }

    //If there is a cap on maximum number of slots, apply it
    int maxSlots = HServerParameters.getSetting(HServerParameters.MAX_SLOTS, configuration);
    if (maxSlots > 0) {
        numberOfWorkers = Math.min(numberOfWorkers, maxSlots);
    }

}

From source file:org.geppetto.simulation.manager.ExperimentRunManager.java

/**
 * @param user/*  ww w  .  j a v  a 2 s . c o m*/
 * @param experiment
 * @param status
 */
private synchronized void addExperimentToQueue(IUser user, IExperiment experiment, ExperimentStatus status) {
    BlockingQueue<IExperiment> userExperiments = queue.get(user);
    if (userExperiments == null) {
        userExperiments = new ArrayBlockingQueue<IExperiment>(100);
        queue.put(user, userExperiments);
    }
    if (experiment.getStatus() == status) {
        experiment.setStatus(ExperimentStatus.QUEUED);
        userExperiments.add(experiment);
    }
}

From source file:net.dempsy.container.TestContainer.java

public NodeManager addOutputCatchStage() throws InterruptedException {
    // =======================================================
    // configure an output catcher tier
    final Node out = new Node("test-app").defaultRoutingStrategyId("net.dempsy.router.simple")
            .receiver(new BlockingQueueReceiver(new ArrayBlockingQueue<>(16)))
            .setNodeStatsCollector(new BasicNodeStatsCollector()); // same app as the spring file.
    out.cluster("output-catch").mp(new MessageProcessor<OutputCatcher>(new OutputCatcher()));
    out.validate();/* w w  w.ja  va 2s . c  om*/

    final NodeManager nman = track(new NodeManager()).node(out)
            .collaborator(track(sessionFactory.createSession())).start();
    // wait until we can actually reach the output-catch cluster from the main node
    assertTrue(poll(o -> {
        try {
            return canReach(getRouter(manager), "output-catch",
                    new KeyExtractor().extract(new OutputMessage("foo", 1, 1)).iterator().next());
        } catch (final Exception e) {
            return false;
        }
    }));
    // =======================================================
    return nman;
}

From source file:org.apache.hadoop.raid.ParallelStreamReader.java

/**
 * Reads data from multiple streams in parallel and puts the data in a queue.
 * @param streams The input streams to read from.
 * @param bufSize The amount of data to read from each stream in each go.
 * @param numThreads Number of threads to use for parallelism.
 * @param boundedBuffer The queue to place the results in.
 *///from   w w w.  ja v  a  2  s . co m

public ParallelStreamReader(Progressable reporter, InputStream[] streams, int bufSize, int numThreads,
        int boundedBufferCapacity, long maxBytesPerStream, boolean computeChecksum, OutputStream[] outs)
        throws IOException {
    this.reporter = reporter;
    this.computeChecksum = computeChecksum;
    this.streams = new InputStream[streams.length];
    this.endOffsets = new long[streams.length];
    if (computeChecksum) {
        this.checksums = new CRC32[streams.length];
    }
    this.outs = outs;
    for (int i = 0; i < streams.length; i++) {
        this.streams[i] = streams[i];
        if (this.streams[i] instanceof DFSDataInputStream) {
            DFSDataInputStream stream = (DFSDataInputStream) this.streams[i];
            // in directory raiding, the block size for each input stream 
            // might be different, so we need to determine the endOffset of
            // each stream by their own block size.
            List<LocatedBlock> blocks = stream.getAllBlocks();
            if (blocks.size() == 0) {
                this.endOffsets[i] = Long.MAX_VALUE;
                if (computeChecksum) {
                    this.checksums[i] = null;
                }
            } else {
                long blockSize = blocks.get(0).getBlockSize();
                this.endOffsets[i] = stream.getPos() + blockSize;
                if (computeChecksum) {
                    this.checksums[i] = new CRC32();
                }
            }
        } else {
            this.endOffsets[i] = Long.MAX_VALUE;
            if (computeChecksum) {
                this.checksums[i] = null;
            }
        }
        streams[i] = null; // Take over ownership of streams.
    }
    this.bufSize = bufSize;
    this.boundedBuffer = new ArrayBlockingQueue<ReadResult>(boundedBufferCapacity);
    if (numThreads > streams.length) {
        this.numThreads = streams.length;
    } else {
        this.numThreads = numThreads;
    }
    this.remainingBytesPerStream = maxBytesPerStream;
    this.slots = new Semaphore(this.numThreads);
    ThreadFactory ParallelStreamReaderFactory = new ThreadFactoryBuilder()
            .setNameFormat("ParallelStreamReader-read-pool-%d").build();
    this.readPool = Executors.newFixedThreadPool(this.numThreads, ParallelStreamReaderFactory);
    this.mainThread = new MainThread();
    mainThread.setName("ParallelStreamReader-main");
}