Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue(Collection<? extends E> c) 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE , initially containing the elements of the given collection, added in traversal order of the collection's iterator.

Usage

From source file:org.green.code.async.executor.ThreadPoolTaskExecutor.java

/**
 * Create the BlockingQueue to use for the ThreadPoolExecutor.
 * <p>//from  w w w  .ja  v a  2s .  c  om
 * A LinkedBlockingQueue instance will be created for a positive capacity
 * value; a SynchronousQueue else.
 * 
 * @param queueCapacity
 *            the specified queue capacity
 * @return the BlockingQueue instance
 * @see java.util.concurrent.LinkedBlockingQueue
 * @see java.util.concurrent.SynchronousQueue
 */
protected BlockingQueue<Runnable> createQueue(int queueCapacity) {
    if (queueCapacity > 0) {
        return new LinkedBlockingQueue<Runnable>(queueCapacity);
    } else {
        return new SynchronousQueue<Runnable>();
    }
}

From source file:com.addthis.hydra.kafka.consumer.KafkaSource.java

@Override
public void init() {
    try {/*from w w  w. j  av  a 2 s . co  m*/
        if (ignoreMarkDir) {
            File md = new File(markDir);
            if (md.exists()) {
                FileUtils.deleteDirectory(md);
                log.info("Deleted marks directory : {}", md);
            }
        }
        this.bundleQueue = new LinkedBlockingQueue<>(queueSize);
        this.markDb = new PageDB<>(LessFiles.initDirectory(markDir), SimpleMark.class, 100, 100);
        // move to init method
        this.fetchExecutor = new ThreadPoolExecutor(fetchThreads, fetchThreads, 0L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setNameFormat("source-kafka-fetch-%d").setDaemon(true).build());
        this.decodeExecutor = new ThreadPoolExecutor(decodeThreads, decodeThreads, 0L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(),
                new ThreadFactoryBuilder().setNameFormat("source-kafka-decode-%d").setDaemon(true).build());
        this.running = new AtomicBoolean(true);
        final DateTime startTime = (startDate != null) ? DateUtil.getDateTime(dateFormat, startDate) : null;

        zkClient = ZkUtil.makeStandardClient(zookeeper, false);
        TopicMetadata metadata = null;
        int metadataAttempt = 0;
        while (metadata == null && metadataAttempt < metadataRetries) {
            try {
                metadata = ConsumerUtils.getTopicMetadata(zkClient, seedBrokers, topic);
            } catch (Exception e) {
                log.error(
                        "failed to get kafka metadata (attempt {} / {}) for topic: {}, using brokers: {}, error: {}",
                        metadataAttempt, metadataRetries, topic, seedBrokers, e);
                Thread.sleep(metadataBackoff);
            }
            metadataAttempt++;
        }

        final Integer[] shards = config.calcShardList(metadata.partitionsMetadata().size());
        final ListBundleFormat bundleFormat = new ListBundleFormat();
        final CountDownLatch decodeLatch = new CountDownLatch(shards.length);
        for (final int shard : shards) {
            LinkedBlockingQueue<MessageWrapper> messageQueue = new LinkedBlockingQueue<>(this.queueSize);
            final PartitionMetadata partition = metadata.partitionsMetadata().get(shard);
            FetchTask fetcher = new FetchTask(this, topic, partition, startTime, messageQueue);
            fetchExecutor.execute(fetcher);
            Runnable decoder = new DecodeTask(decodeLatch, format, bundleFormat, running, messageQueue,
                    bundleQueue);
            decodeExecutor.execute(decoder);
        }
        decodeExecutor.submit(new MarkEndTask<>(decodeLatch, running, bundleQueue, bundleQueueEndMarker));
    } catch (Exception ex) {
        log.error("Error initializing kafka source: ", ex);
        throw new RuntimeException(ex);
    }
}

From source file:org.wso2.carbon.device.mgt.output.adapter.http.HTTPEventAdapter.java

@Override
public void init() throws OutputEventAdapterException {

    tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId();

    //ExecutorService will be assigned  if it is null
    if (executorService == null) {
        int minThread;
        int maxThread;
        long defaultKeepAliveTime;
        int jobQueSize;

        //If global properties are available those will be assigned else constant values will be assigned
        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME) != null) {
            minThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE_NAME));
        } else {/*from w  w w .  jav a 2s. c o  m*/
            minThread = HTTPEventAdapterConstants.ADAPTER_MIN_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME) != null) {
            maxThread = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE_NAME));
        } else {
            maxThread = HTTPEventAdapterConstants.ADAPTER_MAX_THREAD_POOL_SIZE;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME) != null) {
            defaultKeepAliveTime = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.ADAPTER_KEEP_ALIVE_TIME_NAME));
        } else {
            defaultKeepAliveTime = HTTPEventAdapterConstants.DEFAULT_KEEP_ALIVE_TIME_IN_MILLIS;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME) != null) {
            jobQueSize = Integer.parseInt(
                    globalProperties.get(HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE_NAME));
        } else {
            jobQueSize = HTTPEventAdapterConstants.ADAPTER_EXECUTOR_JOB_QUEUE_SIZE;
        }
        executorService = new ThreadPoolExecutor(minThread, maxThread, defaultKeepAliveTime,
                TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(jobQueSize));

        //configurations for the httpConnectionManager which will be shared by every http adapter
        int defaultMaxConnectionsPerHost;
        int maxTotalConnections;

        if (globalProperties.get(HTTPEventAdapterConstants.DEFAULT_MAX_CONNECTIONS_PER_HOST) != null) {
            defaultMaxConnectionsPerHost = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.DEFAULT_MAX_CONNECTIONS_PER_HOST));
        } else {
            defaultMaxConnectionsPerHost = HTTPEventAdapterConstants.DEFAULT_DEFAULT_MAX_CONNECTIONS_PER_HOST;
        }

        if (globalProperties.get(HTTPEventAdapterConstants.MAX_TOTAL_CONNECTIONS) != null) {
            maxTotalConnections = Integer
                    .parseInt(globalProperties.get(HTTPEventAdapterConstants.MAX_TOTAL_CONNECTIONS));
        } else {
            maxTotalConnections = HTTPEventAdapterConstants.DEFAULT_MAX_TOTAL_CONNECTIONS;
        }

        connectionManager = new MultiThreadedHttpConnectionManager();
        connectionManager.getParams().setDefaultMaxConnectionsPerHost(defaultMaxConnectionsPerHost);
        connectionManager.getParams().setMaxTotalConnections(maxTotalConnections);
    }
}

From source file:nu.nethome.home.impl.HomeServer.java

public HomeServer() {
    eventQueue = new LinkedBlockingQueue<>(MAX_QUEUE_SIZE);
    logRecords = new LinkedBlockingDeque<>(LOG_RECORD_CAPACITY);
    setupLogger();/*  w  w  w.  j  a  va  2 s .  com*/
    eventCountlogger.activate(this);
    commandLineExecutor = new CommandLineExecutor(this, true);
    python = new Python();
}

From source file:org.yamj.core.service.ScanningScheduler.java

@Scheduled(initialDelay = 10000, fixedDelay = 45000)
public void scanPeopleData() throws Exception {
    int maxThreads = configService.getIntProperty("yamj3.scheduler.peoplescan.maxThreads", 1);
    if (maxThreads <= 0) {
        if (!messageDisabledPeople) {
            messageDisabledPeople = Boolean.TRUE;
            LOG.info("People scanning is disabled");
        }/*from   www . ja v  a 2 s .  c  om*/
        return;
    } else {
        messageDisabledPeople = Boolean.FALSE;
    }

    int maxResults = configService.getIntProperty("yamj3.scheduler.peoplescan.maxResults", 50);
    List<QueueDTO> queueElements = metadataStorageService.getPersonQueueForScanning(maxResults);
    if (CollectionUtils.isEmpty(queueElements)) {
        LOG.debug("No people data found to scan");
        return;
    }

    LOG.info("Found {} people objects to process; scan with {} threads", queueElements.size(), maxThreads);
    BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements);

    ExecutorService executor = Executors.newFixedThreadPool(maxThreads);
    for (int i = 0; i < maxThreads; i++) {
        PluginMetadataRunner worker = new PluginMetadataRunner(queue, pluginMetadataService);
        executor.execute(worker);
    }
    executor.shutdown();

    // run until all workers have finished
    while (!executor.isTerminated()) {
        try {
            TimeUnit.SECONDS.sleep(5);
        } catch (InterruptedException ignore) {
        }

    }

    LOG.debug("Finished people data scanning");
}

From source file:com.ebay.jetstream.application.JetstreamApplication.java

/**
 * Override this method to do initialization in a custom JetstreamApplication.
 * //w w w .  j  a  v  a 2  s . c om
 * @throws Exception
 */
protected void init() throws Exception {
    JetstreamApplicationInformation ai = getApplicationInformation();
    ai.selfLocate();

    m_workQueue = new LinkedBlockingQueue<Runnable>(ai.getWorkQeueSz());
    m_worker = new ThreadPoolExecutor(ai.getWorkerThreads(), 3, 30, TimeUnit.SECONDS, m_workQueue,
            new ThreadPoolExecutor.CallerRunsPolicy());
    m_worker.prestartCoreThread();

    Management.addBean(ai.getApplicationName(), this);
    logInfo("Starting services for " + ai);
    String[] configs = ai.getConfigRoots();
    RootConfiguration rc = configs == null ? new RootConfiguration(ai) : new RootConfiguration(ai, configs);
    rc.start();
    String[] sa = ai.getBeans();
    if (sa != null)
        for (String bean : sa)
            rc.getBean(bean);
}

From source file:org.apache.apex.malhar.lib.db.jdbc.AbstractJdbcPollInputOperator.java

@Override
public void setup(OperatorContext context) {
    super.setup(context);
    dslContext = createDSLContext();/*from   ww  w  .j a  va 2 s  .  c  om*/
    if (scanService == null) {
        scanService = Executors.newScheduledThreadPool(1);
    }
    execute = true;
    emitQueue = new LinkedBlockingQueue<>(queueCapacity);
    windowManager.setup(context);
}

From source file:com.alibaba.otter.manager.biz.monitor.impl.GlobalMonitor.java

@Override
public void afterPropertiesSet() throws Exception {
    nThreads = nThreads <= 0 ? DEFAULT_THREADS : nThreads;
    executor = new ThreadPoolExecutor(nThreads, nThreads, 0, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(nThreads * 2), new NamedThreadFactory("global monitor", false),
            new ThreadPoolExecutor.CallerRunsPolicy());

}

From source file:disko.flow.analyzers.FullRelexAnalyzer.java

public void process(AnalysisContext<TextDocument> ctx, Ports ports) throws InterruptedException {
    if (pool == null)
        init();/*from  ww w .j  a v a2s. c  o  m*/
    final InputPort<EntityMaintainer> inputPort = ports.getInput(EntityAnalyzer.ENTITY_CHANNEL);
    final OutputPort<RelexTaskResult> outputPort = ports.getOutput(PARSE_CHANNEL);
    final LinkedBlockingQueue<Future<RelexTaskResult>> futureResults = new LinkedBlockingQueue<Future<RelexTaskResult>>(
            outputPort.getChannel().getCapacity());
    log.debug("Starting LinkGrammarAnalyzer...");
    exec.submit(new Callable<Integer>() {
        public Integer call() throws Exception {
            try {
                log.debug("LinkGrammarAnalyzer from channel + " + inputPort.getChannel());
                for (EntityMaintainer em = inputPort.take(); !inputPort.isEOS(em); em = inputPort.take())
                    submitTask(em, futureResults);
            } catch (Throwable t) {
                log.error("Unable to submit parsing task.", t);
            } finally {
                futureResults.put(new FutureRelexTaskResultEOS());
            }
            return (futureResults.size() - 1);
        }
    });

    try {
        while (true) {
            try {
                Future<RelexTaskResult> futureResult = futureResults.take();
                RelexTaskResult relexTaskResult;
                relexTaskResult = futureResult.get();
                if (relexTaskResult == null)
                    break;
                log.debug("LinkGrammarAnalyzer received " + relexTaskResult.index + ": "
                        + relexTaskResult.result.getParses().size() + " parses of sentences "
                        + relexTaskResult.sentence);
                relexTaskResult.result.setSentence(relexTaskResult.entityMaintainer.getOriginalSentence());
                outputPort.put(relexTaskResult);
            } catch (InterruptedException e) {
                for (Future<RelexTaskResult> future : futureResults) {
                    try {
                        future.cancel(true);
                    } catch (Throwable t) {
                        log.error(t);
                    }
                }
                break;
            }
        }
        for (Future<RelexTaskResult> future : futureResults) {
            future.cancel(true);
        }
    } catch (ExecutionException e) {
        throw new RuntimeException(e);
    } finally {
        outputPort.close();
        /*
         * exec.shutdown(); for (RelexContext context: pool){
         * context.getLinkParserClient().close(); }
         */
        destroy();
    }
}