Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:com.streamsets.datacollector.lineage.LineagePublisherTaskImpl.java

@Override
protected void initTask() {
    String lineagePluginsConfig = configuration.get(LineagePublisherConstants.CONFIG_LINEAGE_PUBLISHERS, null);
    if (StringUtils.isEmpty(lineagePluginsConfig)) {
        LOG.info("No publishers configured");
        return;//from w w  w  . jav  a2 s.c om
    }

    String[] lineagePlugins = lineagePluginsConfig.split(",");
    // This implementation is intentionally limited to only one plugin at the moment
    if (lineagePlugins.length != 1) {
        throw new IllegalStateException("Only one lineage publisher is supported at the moment");
    }
    String publisherName = lineagePlugins[0];
    LineagePublisherDefinition def = getDefinition(publisherName);
    LOG.info("Using lineage publisher named {} (backed by {}::{})", publisherName,
            def.getLibraryDefinition().getName(), def.getName());

    // Instantiate and initialize the publisher
    createAndInitializeRuntime(def, publisherName);

    // Initialize blocking queue that will buffer data before sending them to lineage publisher
    int size = configuration.get(LineagePublisherConstants.CONFIG_LINEAGE_QUEUE_SIZE,
            LineagePublisherConstants.DEFAULT_LINEAGE_QUEUE_SIZE);
    eventQueue = new ArrayBlockingQueue<>(size);

    // And run the separate thread
    executorService = Executors.newSingleThreadExecutor();
    consumerRunnable = new EventQueueConsumer();
}

From source file:org.apache.hadoop.raid.IAEncoder.java

protected void encodeFileToStream(FileSystem fs, Path srcFile, long srcSize, long blockSize, OutputStream out,
        Progressable reporter) throws IOException {
    // (disable) One parity block can be written directly to out, rest to local files.
    //tmpOuts[0] = out;

    //File[] tmpFiles = new File[paritySize];

    /*//from  w  w w .  j av a 2  s.  c o m
     * signal queue to trigger ouput
     * No need blocking queue (adjust in the future)
     */
    BlockingQueue<byte[]> closedBuf = new ArrayBlockingQueue<byte[]>(14);

    /*
     * Output thread
     */
    DataSender ds = new DataSender(closedBuf, out, blockSize, srcSize);
    Thread dst = new Thread(ds);
    dst.start();

    // Loop over stripes in the file.
    for (long stripeStart = 0; stripeStart < srcSize; stripeStart += blockSize * stripeSize) {
        reporter.progress();

        //LOG.info("Starting encoding of stripe " + srcFile + ":" + stripeStart);

        byte[][] bufs = new byte[paritySize][];
        /*
         * create temp file to write parity block (one file for each block)
         */
        //LOG.info("allocating memory mcount: "+mcount);
        for (int i = 0; i < paritySize; i++) {
            //tmpFiles[i] = File.createTempFile("parity", "_" + i); 
            //LOG.info("Created tmp file " + tmpFiles[i]);
            //tmpFiles[i].deleteOnExit();
            //LOG.info("allocating memory index: "+i);
            bufs[i] = new byte[(int) blockSize];
            mcount++;
        }
        //LOG.info("allocated memory");

        // Create input streams for blocks in the stripe.
        InputStream[] blocks = stripeInputs(fs, srcFile, stripeStart, srcSize, blockSize);

        //LOG.info("created InputStream");
        /*
         * encode data
         */
        encodeStripe(blocks, stripeStart, blockSize, bufs, reporter);

        /*
         * triger output
         */

        //LOG.info("encoded stripe");
        for (int i = 0; i < paritySize; i++) {
            try {
                closedBuf.put(bufs[i]);
            } catch (InterruptedException e) {
            }
            reporter.progress();
        }
        //LOG.info("push closed buf");
    }

    try {
        //waiting for the end of output
        dst.join();
        LOG.info("dst joined");
    } catch (InterruptedException e) {
        LOG.info("thread join interrupted");
    }
}

From source file:com.turn.ttorrent.common.TorrentCreator.java

/**
 * Creates a new executor suitable for torrent hashing.
 * //from   w w  w  . j  a v a 2  s  . c om
 * This executor controls memory usage by using a bounded queue, and the
 * CallerRunsPolicy slows down the producer if the queue bound is exceeded.
 * The requirement is then to make the queue large enough to keep all the
 * executor threads busy if the producer executes a task itself.
 * 
 * In terms of memory, Executor.execute is much more efficient than
 * ExecutorService.submit, and ByteBuffer(s) released by the ChunkHasher(s)
 * remain in eden space, so are rapidly recycled for reading the next
 * block(s). JVM ergonomics can make this much more efficient than any
 * heap-based strategy we might devise. Now, we want the queue size small
 * enough that JVM ergonomics keeps the eden size small.
 */
@Nonnull
public static ThreadPoolExecutor newExecutor(@Nonnull String peerName) {
    int threads = getHashingThreadsCount();
    logger.info("Creating ExecutorService with {} threads", new Object[] { threads });
    ThreadFactory factory = new DefaultThreadFactory("bittorrent-executor-" + peerName, true);
    ThreadPoolExecutor service = new ThreadPoolExecutor(0, threads, 1L, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(threads * 3), factory, new ThreadPoolExecutor.CallerRunsPolicy());
    service.allowCoreThreadTimeOut(true);
    return service;
}

From source file:nextflow.fs.dx.DxUploadOutputStream.java

/**
 * Initialize the uploader output stream for the specified file
 *
 * @param fileId The target DnaNexus file
 * @param remote The DnaNexus API wrapper object
 * @param maxForks Maximum number of parallel upload jobs allowed (default: 5)
 *///from ww w  .  j  a v a 2s  .  com
public DxUploadOutputStream(String fileId, DxApi remote, int maxForks) {

    this.fileId = fileId;
    this.queue = new ArrayBlockingQueue<>(maxForks);
    this.phaser = new Phaser();
    this.remote = remote;
    this.buf = allocate();
    this.executor = Executors.newCachedThreadPool();
    checkCapacity();
    start();
}

From source file:org.openiot.gsn.vsensor.RVirtualSensor.java

public void dataAvailable(String inputStreamName, StreamElement streamElement) {
    ArrayBlockingQueue<StreamElement> circularBuffer = circularBuffers.get(inputStreamName);

    // Get the circular buffer that matches the input stream. Create a new one
    // if none exists
    if (circularBuffer == null) {
        circularBuffer = new ArrayBlockingQueue<StreamElement>(windowSize);
        circularBuffers.put(inputStreamName, circularBuffer);
    }//from  w  w  w.  j a  va2  s  .co  m
    try {
        circularBuffer.put(streamElement);

        logger.debug(
                "Window for " + inputStreamName + " contains: " + circularBuffer.size() + " of " + windowSize);

        if (circularBuffer.size() == windowSize) {
            logger.info("Window for " + inputStreamName + " contains: " + circularBuffer.size() + " of "
                    + windowSize);

            // Connect to Rserve and assign global variables
            try {
                rc = new RConnection(params.get(SERVER), Integer.parseInt(params.get(PORT)));

                logger.info("Connected to R server " + params.get(SERVER) + ":" + params.get(PORT));

                String[] fieldname = streamElement.getFieldNames();

                logger.info("Sending " + fieldname.length + " data attributes to R server.");

                // Assign R vector variables prior the script
                for (int n = 0; n < fieldname.length; n++) {
                    // Build the window
                    double[] values = new double[windowSize];
                    StreamElement elt = null;

                    // convert the circular buffer to an array
                    Object[] elts = circularBuffer.toArray();
                    for (int i = 0; i < elts.length; i++) {
                        elt = (StreamElement) elts[i];
                        values[i] = ((Number) elt.getData()[n]).doubleValue(); //
                    }

                    // assign vectors as R variables
                    rc.assign("gsn_" + fieldname[n].toLowerCase(), values);
                }

                logger.info("Done.");

                // read the R script
                // open the script file every time we do some processing (this can be
                // improved).
                File file = new File(params.get(SCRIPT).toString());
                script = FileUtils.readFileToString(file, "UTF-8");

                logger.info("Sending R script.");

                // evaluate the R script
                rc.voidEval(script);
                logger.info("Done.");

                // get the output timestamp
                logger.info("Performing computation in R server (please wait).");

                // collect vector values after computation
                DataField[] outStructure = null;

                outStructure = getVirtualSensorConfiguration().getOutputStructure();

                String[] plotFieldName = new String[outStructure.length];
                Byte[] plotFieldType = new Byte[outStructure.length];

                for (int w = 0; w < outStructure.length; w++) {
                    plotFieldName[w] = outStructure[w].getName();
                    plotFieldType[w] = outStructure[w].getDataTypeID();
                }

                Serializable[] outputData = null;
                StreamElement se = null;

                Byte[] fieldType = streamElement.getFieldTypes();

                // check if we have defined more attributes in the output structure
                if (outStructure.length > fieldname.length) {
                    outputData = new Serializable[outStructure.length];
                } else {
                    outputData = new Serializable[fieldname.length];
                }

                for (int n = 0; n < fieldname.length; n++) {
                    // evaluate/get attribute data from R server
                    xp = rc.parseAndEval(fieldname[n].toLowerCase());

                    if (fieldType[n] == DataTypes.DOUBLE) {
                        double[] b1 = xp.asDoubles();
                        outputData[n] = b1[b1.length - 1];
                    }

                    if (fieldType[n] == DataTypes.INTEGER) {
                        int[] b1 = xp.asIntegers();
                        outputData[n] = b1[b1.length - 1];
                    }

                    if (fieldType[n] == DataTypes.BIGINT) {
                        int[] b1 = xp.asIntegers();
                        outputData[n] = (long) b1[b1.length - 1];
                    }
                }

                int len1 = outStructure.length;
                int len2 = fieldname.length;

                // check if we have defined more attributes in the output structure
                if (len1 > len2) {
                    if (stype.equals("plot")) {
                        xp = rc.parseAndEval("gsn_plot");
                        outputData[len2] = xp.asBytes();

                        se = new StreamElement(plotFieldName, plotFieldType, outputData);
                    }
                } else {
                    se = new StreamElement(fieldname, fieldType, outputData);
                }

                logger.info("Computation finished.");

                dataProduced(se);
                logger.debug("Stream published: " + se.toString().toLowerCase());

                // Close connection to R server
                rc.close();
                logger.info("Connection to R server closed.");

            } catch (Exception e) {
                logger.warn(e);
                // Close connection to R server
                logger.info("Connection to R server closed.");
                rc.close();
            }

            // Remove step size elements from the beginning of the buffer
            for (int i = 0; i < stepSize; i++) {
                try {
                    circularBuffer.take();
                } catch (InterruptedException e) {
                    logger.warn(e.getMessage(), e);
                }
            }

        }

        // end if if for window
    } catch (InterruptedException e) {
        logger.warn(e.getMessage(), e);
    }

}

From source file:backup.datanode.DataNodeRestoreProcessor.java

public DataNodeRestoreProcessor(Configuration conf, DataNode datanode) throws Exception {
    _closer = Closer.create();//from  www .j av  a2 s.  c  o  m
    _datanode = datanode;
    _restoreThroughput = Metrics.METRICS.meter(RESTORE_THROUGHPUT);
    _bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
            DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    _checksumType = Type
            .valueOf(conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT));
    int threads = conf.getInt(DFS_BACKUP_DATANODE_RESTORE_BLOCK_HANDLER_COUNT_KEY,
            DFS_BACKUP_DATANODE_RESTORE_BLOCK_HANDLER_COUNT_DEFAULT);
    long pauseOnError = conf.getLong(DFS_BACKUP_DATANODE_RESTORE_ERROR_PAUSE_KEY,
            DFS_BACKUP_DATANODE_RESTORE_ERROR_PAUSE_DEFAULT);
    _backupStore = _closer.register(BackupStore.create(BackupUtil.convert(conf)));
    _restoreBlocks = new ArrayBlockingQueue<>(threads);
    _executorService = Executors.newCachedThreadPool();
    _closer.register((Closeable) () -> _executorService.shutdownNow());
    for (int t = 0; t < threads; t++) {
        _executorService.submit(Executable.createDaemon(LOG, pauseOnError, _running, () -> restoreBlocks()));
    }
}

From source file:gsn.vsensor.RVirtualSensor.java

public void dataAvailable(String inputStreamName, StreamElement streamElement) {
    ArrayBlockingQueue<StreamElement> circularBuffer = circularBuffers.get(inputStreamName);

    // Get the circular buffer that matches the input stream. Create a new one
    // if none exists
    if (circularBuffer == null) {
        circularBuffer = new ArrayBlockingQueue<StreamElement>(windowSize);
        circularBuffers.put(inputStreamName, circularBuffer);
    }// w  w  w.  j av a 2 s  .  c  om
    try {
        circularBuffer.put(streamElement);

        logger.debug(
                "Window for " + inputStreamName + " contains: " + circularBuffer.size() + " of " + windowSize);

        if (circularBuffer.size() == windowSize) {
            logger.info("Window for " + inputStreamName + " contains: " + circularBuffer.size() + " of "
                    + windowSize);

            // Connect to Rserve and assign global variables
            try {
                rc = new RConnection(params.get(SERVER), Integer.parseInt(params.get(PORT)));

                logger.info("Connected to R server " + params.get(SERVER) + ":" + params.get(PORT));

                String[] fieldname = streamElement.getFieldNames();

                logger.info("Sending " + fieldname.length + " data attributes to R server.");

                // Assign R vector variables prior the script
                for (int n = 0; n < fieldname.length; n++) {
                    // Build the window
                    double[] values = new double[windowSize];
                    StreamElement elt = null;

                    // convert the circular buffer to an array
                    Object[] elts = circularBuffer.toArray();
                    for (int i = 0; i < elts.length; i++) {
                        elt = (StreamElement) elts[i];
                        values[i] = ((Number) elt.getData()[n]).doubleValue(); //
                    }

                    // assign vectors as R variables
                    rc.assign("gsn_" + fieldname[n].toLowerCase(), values);
                }

                logger.info("Done.");

                // read the R script
                // open the script file every time we do some processing (this can be
                // improved).
                File file = new File(params.get(SCRIPT).toString());
                script = FileUtils.readFileToString(file, "UTF-8");

                logger.info("Sending R script.");

                // evaluate the R script
                rc.voidEval(script);
                logger.info("Done.");

                // get the output timestamp
                logger.info("Performing computation in R server (please wait).");

                // collect vector values after computation
                DataField[] outStructure = null;

                outStructure = getVirtualSensorConfiguration().getOutputStructure();

                String[] plotFieldName = new String[outStructure.length];
                Byte[] plotFieldType = new Byte[outStructure.length];

                for (int w = 0; w < outStructure.length; w++) {
                    plotFieldName[w] = outStructure[w].getName();
                    plotFieldType[w] = outStructure[w].getDataTypeID();
                }

                Serializable[] outputData = null;
                StreamElement se = null;

                Byte[] fieldType = streamElement.getFieldTypes();

                // check if we have defined more attributes in the output structure
                if (outStructure.length > fieldname.length) {
                    outputData = new Serializable[outStructure.length];
                } else {
                    outputData = new Serializable[fieldname.length];
                }

                for (int n = 0; n < fieldname.length; n++) {
                    // evaluate/get attribute data from R server
                    xp = rc.parseAndEval(fieldname[n].toLowerCase());

                    if (fieldType[n] == DataTypes.DOUBLE) {
                        double[] b1 = xp.asDoubles();
                        outputData[n] = b1[b1.length - 1];
                    }

                    if (fieldType[n] == DataTypes.INTEGER) {
                        int[] b1 = xp.asIntegers();
                        outputData[n] = b1[b1.length - 1];
                    }

                    if (fieldType[n] == DataTypes.BIGINT) {
                        int[] b1 = xp.asIntegers();
                        outputData[n] = (long) b1[b1.length - 1];
                    }
                }

                int len1 = outStructure.length;
                int len2 = fieldname.length;

                // check if we have defined more attributes in the output structure
                if (len1 > len2) {
                    if (stype.equals("plot")) {
                        xp = rc.parseAndEval("gsn_plot");
                        outputData[len2] = xp.asBytes();

                        se = new StreamElement(plotFieldName, plotFieldType, outputData);
                    }
                } else {
                    se = new StreamElement(fieldname, fieldType, outputData);
                }

                logger.info("Computation finished.");

                dataProduced(se);
                logger.debug("Stream published: " + se.toString().toLowerCase());

                // Close connection to R server
                rc.close();
                logger.info("Connection to R server closed.");

            } catch (Exception e) {
                logger.warn(e.getMessage());
                // Close connection to R server
                logger.info("Connection to R server closed.");
                rc.close();
            }

            // Remove step size elements from the beginning of the buffer
            for (int i = 0; i < stepSize; i++) {
                try {
                    circularBuffer.take();
                } catch (InterruptedException e) {
                    logger.warn(e.getMessage(), e);
                }
            }

        }

        // end if if for window
    } catch (InterruptedException e) {
        logger.warn(e.getMessage(), e);
    }

}

From source file:com.amazonaws.services.cloudtrail.processinglibrary.factory.ThreadPoolFactory.java

/**
 * Helper function to create an instance of ExecutorService with bounded queue size.
 *
 * When no more threads or queue slots are available because their bounds would be exceeded, the scheduled thread
 * pool will run the rejected task directly. Unless the executor has been shut down, in which case the task is
 * discarded. Note while scheduled thread poll is running rejected task, scheduled thread pool will not poll
 * more messages to process.//from   w ww  .j  a  v a  2 s. c  o m
 *
 * @param threadCount number of threads
 * @return an instance of ExecutorService
 */
private ExecutorService createThreadPoolWithBoundedQueue(int threadCount) {
    BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<Runnable>(threadCount);
    RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
    ExecutorService executorService = new ProcessingLibraryThreadPoolExecutor(threadCount, threadCount, 0,
            TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler, this.exceptionHandler);
    return executorService;
}

From source file:org.itstechupnorth.walrus.dir.DirectoryProcessor.java

protected BlockingQueue<Message<File>> queue() {
    return new ArrayBlockingQueue<Message<File>>(buffers);
}

From source file:org.apache.hadoop.raid.DirectoryTraversal.java

public DirectoryTraversal(String friendlyName, Collection<Path> roots, FileSystem fs, Filter filter,
        int numThreads, boolean doShuffle, boolean allowUseStandby, boolean checkLeafDir) throws IOException {
    this.output = new ArrayBlockingQueue<FileStatus>(OUTPUT_QUEUE_SIZE);
    this.directories = new LinkedBlockingDeque<Path>();
    this.fs = fs;
    if (ENABLE_AVATAR_STANDBY && allowUseStandby && fs instanceof DistributedAvatarFileSystem) {
        avatarFs = (DistributedAvatarFileSystem) fs;
    } else {//from www.j a v  a2 s.  co  m
        avatarFs = null;
    }
    this.filter = filter;
    this.totalDirectories = new AtomicInteger(roots.size());
    this.processors = new Processor[numThreads];
    this.activeThreads = new AtomicInteger(numThreads);
    this.doShuffle = doShuffle;
    this.allowStandby = allowUseStandby;
    if (doShuffle) {
        List<Path> toShuffleAndAdd = new ArrayList<Path>();
        toShuffleAndAdd.addAll(roots);
        Collections.shuffle(toShuffleAndAdd);
        this.directories.addAll(toShuffleAndAdd);
    } else {
        this.directories.addAll(roots);
    }
    LOG.info("Starting with directories:" + roots.toString() + " numThreads:" + numThreads);
    if (roots.isEmpty()) {
        try {
            output.put(FINISH_TOKEN);
        } catch (InterruptedException e) {
            throw new IOException(e);
        }
        return;
    }
    for (int i = 0; i < processors.length; ++i) {
        if (checkLeafDir) {
            processors[i] = new LeafDirectoryProcessor();
        } else {
            processors[i] = new Processor();
        }
        processors[i].setName(friendlyName + i);
    }
    for (int i = 0; i < processors.length; ++i) {
        processors[i].start();
    }
}