Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue(Collection<? extends E> c) 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE , initially containing the elements of the given collection, added in traversal order of the collection's iterator.

Usage

From source file:esg.node.core.ESGQueue.java

public ESGQueue(DataNodeComponent handler, ESGQueueController qController, ESGBatchController bController) {
    this(handler,
            new ThreadPoolExecutor(2, 20, 10L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(1000),
                    new ESGGroupedThreadFactory(handler.getName()), new ESGRejectPolicy(handler.getName())),
            qController, bController);/*from ww  w.j a va2 s .co  m*/
}

From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer.java

/**
 * Uses the supplied HttpClient to send documents to the Solr server.
 *///from   w ww  .  ja v a2  s  . c  om
public ConcurrentUpdateSolrServer(String solrServerUrl, HttpClient client, int queueSize, int threadCount,
        ExecutorService es, boolean streamDeletes) {
    this.server = new HttpSolrServer(solrServerUrl, client);
    this.server.setFollowRedirects(false);
    queue = new LinkedBlockingQueue<>(queueSize);
    this.threadCount = threadCount;
    runners = new LinkedList<>();
    scheduler = es;
    this.streamDeletes = streamDeletes;
}

From source file:com.datasalt.pangool.solr.BatchWriter.java

public BatchWriter(EmbeddedSolrServer solr, int batchSize, TaskID tid, int writerThreads, int queueSize) {
    this.solr = solr;
    this.writerThreads = writerThreads;
    this.queueSize = queueSize;
    taskId = tid;/*from   w  ww . j  a  v a 2 s  .  co  m*/

    // we need to obtain the settings before the constructor
    batchPool = new ThreadPoolExecutor(writerThreads, writerThreads, 5, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(queueSize), new ThreadPoolExecutor.CallerRunsPolicy());
    this.batchToWrite = new ArrayList<SolrInputDocument>(batchSize);
}

From source file:com.jkoolcloud.tnt4j.streams.custom.dirStream.DirStreamingManager.java

private void initialize() {
    executorService = new ThreadPoolExecutor(CORE_TREAD_POOL_SIZE, MAX_TREAD_POOL_SIZE, KEEP_ALIVE_TIME,
            TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(MAX_TREAD_POOL_SIZE * 2),
            new TNTInputStream.StreamsThreadFactory("DirStreamingManagerExecutorThread-")); // NON-NLS

    executorService.setRejectedExecutionHandler(new RejectedExecutionHandler() {
        @Override/*from   ww  w  .  j a  va  2  s .com*/
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                boolean added = executor.getQueue().offer(r, offerTimeout, TimeUnit.SECONDS);
                if (!added) {
                    LOGGER.log(OpLevel.WARNING,
                            StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                    "TNTInputStream.tasks.buffer.limit"),
                            offerTimeout);
                    notifyStreamingJobRejected(r);
                }
            } catch (InterruptedException exc) {
                LOGGER.log(OpLevel.WARNING,
                        StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
                                "DirStreamingManager.job.offer.interrupted"),
                        ((StreamingJob) r).getJobId(), exc);
            }
        }
    });

    dirWatchdog = new DirWatchdog(dirPath, DirWatchdog.getDefaultFilter(fileWildcardName));
    dirWatchdog.addObserverListener(new FileAlterationListenerAdaptor() {
        @Override
        public void onFileCreate(File file) {
            handleJobConfigCreate(file);
        }

        @Override
        public void onFileChange(File file) {
            handleJobConfigChange(file);
        }

        @Override
        public void onFileDelete(File file) {
            handleJobConfigRemoval(file);
        }
    });

    LOGGER.log(OpLevel.DEBUG, StreamsResources.getString(StreamsResources.RESOURCE_BUNDLE_NAME,
            "DirStreamingManager.dir.monitoring.started"), dirPath, fileWildcardName);
}

From source file:org.yamj.core.service.ScanningScheduler.java

@Scheduled(initialDelay = 5000, fixedDelay = 45000)
public void scanMediaFiles() throws Exception {
    int maxThreads = configService.getIntProperty("yamj3.scheduler.mediafilescan.maxThreads", 1);
    if (maxThreads <= 0 || !mediaInfoService.isMediaInfoActivated()) {
        if (!messageDisabledMediaFiles) {
            messageDisabledMediaFiles = Boolean.TRUE;
            LOG.info("Media file scanning is disabled");
        }/*from ww  w.java  2  s .  c  o m*/
        return;
    } else {
        messageDisabledMediaFiles = Boolean.FALSE;
    }

    int maxResults = configService.getIntProperty("yamj3.scheduler.mediafilescan.maxResults", 20);
    List<QueueDTO> queueElements = mediaStorageService.getMediaFileQueueForScanning(maxResults);
    if (CollectionUtils.isEmpty(queueElements)) {
        LOG.debug("No media files found to scan");
        return;
    }

    LOG.info("Found {} media files to process; scan with {} threads", queueElements.size(), maxThreads);
    BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements);

    ExecutorService executor = Executors.newFixedThreadPool(maxThreads);
    for (int i = 0; i < maxThreads; i++) {
        MediaInfoRunner worker = new MediaInfoRunner(queue, mediaInfoService);
        executor.execute(worker);
    }
    executor.shutdown();

    // run until all workers have finished
    while (!executor.isTerminated()) {
        try {
            TimeUnit.SECONDS.sleep(5);
        } catch (InterruptedException ignore) {
        }
    }

    LOG.debug("Finished media file scanning");
}

From source file:sk.datalan.solr.impl.ConcurrentUpdateSolrServer.java

/**
 * Uses the supplied HttpClient to send documents to the Solr server.
 *///w  ww  .  j  a va 2  s. co m
public ConcurrentUpdateSolrServer(String solrServerUrl, HttpClientBuilder clientBuilder, int queueSize,
        int threadCount, ExecutorService es, boolean streamDeletes) {
    this.server = new HttpSolrServer(solrServerUrl, clientBuilder);
    //    this.server.setFollowRedirects(false);
    queue = new LinkedBlockingQueue<>(queueSize);
    this.threadCount = threadCount;
    runners = new LinkedList<>();
    scheduler = es;
    this.streamDeletes = streamDeletes;
}

From source file:com.nextdoor.bender.operation.conditional.ConditionalOperation.java

public Stream<InternalEvent> getOutputStream(Stream<InternalEvent> input) {
    /*/*from w  ww. j av a 2  s.co m*/
     * outputStreams keeps track of the output Stream of each Condition.
     */
    List<Stream<InternalEvent>> outputStreams = new ArrayList<Stream<InternalEvent>>(
            this.conditionsAndProcs.size());

    /*
     * From a list of operation configurations in each condition construct queues and streams.
     */
    this.filtersAndQueues = new ArrayList<Pair<FilterOperation, Queue<InternalEvent>>>(
            this.conditionsAndProcs.size());
    for (Pair<FilterOperation, List<OperationProcessor>> filterAndProcs : this.conditionsAndProcs) {

        FilterOperation filter = filterAndProcs.getLeft();
        List<OperationProcessor> procs = filterAndProcs.getRight();

        /*
         * Construct a Queue for each conditional. This is the input to each Condition.
         */
        Queue<InternalEvent> queue = new Queue<InternalEvent>(
                new LinkedBlockingQueue<InternalEvent>(procs.size()));

        this.filtersAndQueues.add(new ImmutablePair<FilterOperation, Queue<InternalEvent>>(filter, queue));

        /*
         * Connect the condition's input Queue with operations. Each operation returns a stream with its
         * operation concatenated on.
         */
        Stream<InternalEvent> conditionInput = queue.jdkStream();
        for (OperationProcessor proc : procs) {
            conditionInput = proc.perform(conditionInput);
        }

        /*
         * Last input is the output.
         */
        outputStreams.add(conditionInput);
    }

    /*
     * Condition Consumer Threads
     * 
     * Combine each condition's output stream and write to the output Queue. When all data is consumed
     * the last condition closes the output Queue.
     */
    Queue<InternalEvent> outputQueue = new Queue<InternalEvent>(
            new LinkedBlockingQueue<InternalEvent>(this.conditionsAndProcs.size()));
    AtomicInteger lock = new AtomicInteger(outputStreams.size());

    outputStreams.forEach(stream -> {
        this.es.execute(new StreamToQueue(stream, outputQueue, lock));
    });

    /*
     * Consume input Stream in a thread and publish to each condition's Queue.
     */
    new Thread(new Runnable() {
        @Override
        public void run() {
            input.forEach(ievent -> {
                boolean matches = false;

                for (Pair<FilterOperation, Queue<InternalEvent>> filterAndQueue : filtersAndQueues) {
                    FilterOperation filter = filterAndQueue.getLeft();

                    /*
                     * If event passes the filter offer event to queue.
                     */
                    if (filter.test(ievent)) {
                        filterAndQueue.getRight().offer(ievent);
                        matches = true;
                        break;
                    }
                }

                /*
                 * Send to output queue if no case matches
                 */
                if (!matches && !filterNonMatch) {
                    outputQueue.offer(ievent);
                }
            });

            /*
             * Close queues when source queue is consumed.
             */
            for (Pair<FilterOperation, Queue<InternalEvent>> filterAndQueue : filtersAndQueues) {
                filterAndQueue.getRight().close();
            }
        }
    }).start();

    return outputQueue.jdkStream();
}

From source file:com.amazonaws.services.kinesis.connectors.KinesisClientLibraryPipelinedRecordProcessor.java

/**
 * Constructor. If null values are provided for maxQueueWaitTimeMs and/or maxProcessRecordsWaitTimeMs, default values are used.
 *
 * @param recordProcessor/* www . j a  v a  2  s .  com*/
 *            The record processor to wrap
 * @param maxQueueSize
 *            The maximum queue size
 * @param maxQueueWaitTimeMs
 *            Maximum time to block on the queue waiting for GetRecords result in milliseconds
 * @param maxProcessRecordsWaitTimeMs
 *            Maximum time to wait for the queue consumer to shutdown (finish ProcessRecords call) in milliseconds
 */
public KinesisClientLibraryPipelinedRecordProcessor(IRecordProcessor recordProcessor, int maxQueueSize,
        Long maxQueueWaitTimeMs, Long maxProcessRecordsWaitTimeMs) {
    this.recordProcessor = recordProcessor;
    recordQueue = new LinkedBlockingQueue<Record>(maxQueueSize);
    this.maxQueueWaitTimeMs = (maxQueueWaitTimeMs == null) ? DEFAULT_MAXIMUM_QUEUE_WAIT_TIME_MS
            : maxQueueWaitTimeMs;
    this.maxProcessRecordsWaitTimeMs = (maxProcessRecordsWaitTimeMs == null)
            ? DEFAULT_MAXIMUM_PROCESS_RECORDS_WAIT_TIME_MS
            : maxProcessRecordsWaitTimeMs;
}

From source file:com.numenta.taurus.service.TaurusDataSyncService.java

/**
 * Load all instance data from the database
 *///from w  ww.j a va 2  s  .  com
@Override
protected void loadAllData() throws HTMException, IOException {

    Context context = TaurusApplication.getContext();
    if (context == null) {
        // Should not happen.
        // We need application context to run.
        return;
    }

    // Get last known date from the database
    final TaurusDatabase database = TaurusApplication.getDatabase();
    if (database == null) {
        // Should not happen.
        // We need application context to run.
        return;
    }
    long from = database.getLastTimestamp();

    // Get current time
    final long now = System.currentTimeMillis();

    // The server updates the instance data table into hourly buckets as the models process
    // data. This may leave the last hour with outdated values when the server updates the
    // instance data table after we start loading the new hourly bucket.
    // To make sure the last hour bucket is updated we should get data since last update up to
    // now and on when the time is above a certain threshold (15 minutes) also download the
    // previous hour once.
    SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context);

    // Check if we need to update the previous hour
    long previousHourThreshold = prefs.getLong(PREF_PREVIOUS_HOUR_THRESHOLD, now);
    if (now >= previousHourThreshold) {
        // Download the previous hour
        from -= DataUtils.MILLIS_PER_HOUR;

        // Set threshold time to minute 15 of next hour
        Calendar calendar = Calendar.getInstance();
        calendar.setTimeInMillis(now);
        calendar.add(Calendar.HOUR, 1);
        calendar.set(Calendar.MINUTE, 15);
        calendar.set(Calendar.SECOND, 0);
        calendar.set(Calendar.MILLISECOND, 0);
        prefs.edit().putLong(PREF_PREVIOUS_HOUR_THRESHOLD, calendar.getTimeInMillis()).apply();
    }
    final long oldestTimestamp = DataUtils
            .floorTo60minutes(now - TaurusApplication.getNumberOfDaysToSync() * DataUtils.MILLIS_PER_DAY);

    // Check if we need to catch up and download old data
    if (database.getFirstTimestamp() > oldestTimestamp) {
        from = oldestTimestamp;
    }

    // Don't get date older than NUMBER_OF_DAYS_TO_SYNC
    from = Math.max(from, oldestTimestamp);

    // Blocking queue holding data waiting to be saved to the database.
    // This queue will be filled by the TaurusClient as it downloads data and it will be
    // emptied by the databaseTask as is saves data to the database
    final LinkedBlockingQueue<InstanceData> pending = new LinkedBlockingQueue<InstanceData>(
            PENDING_IO_BUFFER_SIZE);

    // Background task used save data to the database. This task will wait for data to arrive
    // from the server and save them to the database in batches until it finds the end of the
    // queue marked by DATA_EOF or it times out after 60 seconds
    final Future<?> databaseTask = getService().getIOThreadPool().submit(new Runnable() {
        @Override
        public void run() {
            // Save data in batches, one day at the time
            final List<InstanceData> batch = new ArrayList<InstanceData>();
            int batchSize = -DataUtils.MILLIS_PER_HOUR;

            // Tracks batch timestamp. Once the data timestamp is greater than the batch
            // timestamp, a new batch is created
            long batchTimestamp = now - DataUtils.MILLIS_PER_HOUR;

            try {
                // Process all pending data until the DATA_EOF is found or a timeout is reached
                InstanceData data;
                while ((data = pending.poll(60, TimeUnit.SECONDS)) != DATA_EOF && data != null) {
                    batch.add(data);
                    // Process batches
                    if (data.getTimestamp() < batchTimestamp) {
                        // Calculate next batch timestamp
                        batchTimestamp = data.getTimestamp() + batchSize;
                        if (database.addInstanceDataBatch(batch)) {
                            // Notify receivers new data has arrived
                            fireInstanceDataChangedEvent();
                        }
                        batch.clear();
                    }
                }
                // Last batch
                if (!batch.isEmpty()) {
                    if (database.addInstanceDataBatch(batch)) {
                        // Notify receivers new data has arrived
                        fireInstanceDataChangedEvent();
                    }
                }
            } catch (InterruptedException e) {
                Log.w(TAG, "Interrupted while loading data");
            }
        }
    });

    try {
        // Get new data from server
        Log.d(TAG, "Start downloading data from " + from);
        TaurusClient client = getClient();
        client.getAllInstanceData(new Date(from), new Date(now), false,
                new HTMClient.DataCallback<InstanceData>() {
                    @Override
                    public boolean onData(InstanceData data) {
                        // enqueue data for saving
                        try {
                            pending.put(data);
                        } catch (InterruptedException e) {
                            pending.clear();
                            Log.w(TAG, "Interrupted while loading data");
                            return false;
                        }
                        return true;
                    }
                });
        // Mark the end of the records
        pending.add(DATA_EOF);
        // Wait for the database task to complete
        databaseTask.get();
        // Clear client cache
        client.clearCache();
    } catch (InterruptedException e) {
        Log.w(TAG, "Interrupted while loading data");
    } catch (ExecutionException e) {
        Log.e(TAG, "Failed to load data", e);
    }
}

From source file:org.wso2.siddhi.extension.output.transport.http.HttpOutputTransport.java

@Override
public void init(String type, Map<String, String> options, Map<String, String> unmappedDynamicOptions) {
    if (executorService == null) {
        int minThread = (options.get(ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null)
                ? Integer.parseInt(options.get(ADAPTER_MIN_THREAD_POOL_SIZE_NAME))
                : ADAPTER_MIN_THREAD_POOL_SIZE;
        int maxThread = (options.get(ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null)
                ? Integer.parseInt(options.get(ADAPTER_MAX_THREAD_POOL_SIZE_NAME))
                : ADAPTER_MAX_THREAD_POOL_SIZE;
        long defaultKeepAliveTime = (options.get(ADAPTER_KEEP_ALIVE_TIME_NAME) != null)
                ? Integer.parseInt(options.get(ADAPTER_KEEP_ALIVE_TIME_NAME))
                : DEFAULT_KEEP_ALIVE_TIME_IN_MILLIS;
        int jobQueSize = (options.get(ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null)
                ? Integer.parseInt(options.get(ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME))
                : ADAPTER_EXECUTOR_JOB_QUEUE_SIZE;
        int defaultMaxConnectionsPerHost = (options.get(DEFAULT_MAX_CONNECTIONS_PER_HOST) != null)
                ? Integer.parseInt(options.get(DEFAULT_MAX_CONNECTIONS_PER_HOST))
                : DEFAULT_DEFAULT_MAX_CONNECTIONS_PER_HOST;
        int maxTotalConnections = (options.get(MAX_TOTAL_CONNECTIONS) != null)
                ? Integer.parseInt(options.get(MAX_TOTAL_CONNECTIONS))
                : DEFAULT_MAX_TOTAL_CONNECTIONS;

        executorService = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(jobQueSize));

        connectionManager = new MultiThreadedHttpConnectionManager();
        connectionManager.getParams().setDefaultMaxConnectionsPerHost(defaultMaxConnectionsPerHost);
        connectionManager.getParams().setMaxTotalConnections(maxTotalConnections);
    }/*  w w  w  .  ja v a  2 s  .c om*/
}