Example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

List of usage examples for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue LinkedBlockingQueue.

Prototype

public LinkedBlockingQueue() 

Source Link

Document

Creates a LinkedBlockingQueue with a capacity of Integer#MAX_VALUE .

Usage

From source file:com.opengamma.bbg.replay.BloombergTickWriter.java

/**
 * @param ticks/*from  ww w  .j  av  a 2 s.  c om*/
 */
private void buildSecurityMapQueue(List<FudgeMsg> ticks) {
    for (FudgeMsg fudgeMsg : ticks) {
        String securityDes = fudgeMsg.getString(SECURITY_KEY);
        if (_securityMapQueue.containsKey(securityDes)) {
            BlockingQueue<FudgeMsg> queue = _securityMapQueue.get(securityDes);
            try {
                queue.put(fudgeMsg);
            } catch (InterruptedException e) {
                Thread.interrupted();
                s_logger.warn("interrupted from putting message on queue");
            }
        } else {
            LinkedBlockingQueue<FudgeMsg> queue = new LinkedBlockingQueue<FudgeMsg>();
            try {
                queue.put(fudgeMsg);
            } catch (InterruptedException e) {
                Thread.interrupted();
                s_logger.warn("interrupted from putting message on queue");
            }
            _securityMapQueue.put(securityDes, queue);
        }
    }
}

From source file:com.kurento.kmf.content.internal.base.AbstractContentSession.java

public AbstractContentSession(ContentHandler<? extends ContentSession> handler, ContentSessionManager manager,
        AsyncContext asyncContext, String contentId) {
    state = STATE.IDLE;/*  w  w  w.  j  a  v a 2 s  .c om*/
    this.handler = handler;
    this.manager = manager;
    this.initialAsyncCtx = asyncContext;
    this.contentId = contentId;
    eventQueue = new LinkedBlockingQueue<Object>();
}

From source file:com.bittorrent.mpetazzoni.client.ConnectionHandler.java

/**
 * Start accepting new connections in a background thread.
 *///  ww  w .  ja  v a2s . co  m
public void start() {
    if (this.channel == null) {
        throw new IllegalStateException("Connection handler cannot be recycled!");
    }

    this.stop = false;

    if (this.executor == null || this.executor.isShutdown()) {
        this.executor = new ThreadPoolExecutor(OUTBOUND_CONNECTIONS_POOL_SIZE, OUTBOUND_CONNECTIONS_POOL_SIZE,
                OUTBOUND_CONNECTIONS_THREAD_KEEP_ALIVE_SECS, TimeUnit.SECONDS,
                new LinkedBlockingQueue<Runnable>(), new ConnectorThreadFactory());
    }

    if (this.thread == null || !this.thread.isAlive()) {
        this.thread = new Thread(this);
        this.thread.setName("bt-serve");
        this.thread.start();
    }
}

From source file:de.micromata.genome.chronos.spi.SchedulerImpl.java

/**
 * Instanziiert den Thread-Pool.//from   ww w.j ava2s  .co m
 *
 * @param threadPoolSize the thread pool size
 * @param name the name
 */
private void initThreadPool(final int threadPoolSize, final String name) {
    queue = new LinkedBlockingQueue<Runnable>();
    this.threadPoolSize = threadPoolSize;

    int i = threadPoolSize;
    if (threadPoolSize == 0) {
        i = 1;
        GLog.warn(GenomeLogCategory.Scheduler, "ThreadPoolSize is given with 0: " + name);
    }
    // es muss mind. ein Thread sein
    executor = new SchedulerThreadPoolExecutor(threadPoolSize, i, 1, TimeUnit.SECONDS, queue, this);
    SchedulerThreadFactory tfactory = new SchedulerThreadFactory();
    String appId = ChronosServiceManager.get().getSchedulerDAO().getShortApplicationName();
    String threadGroupName = "JCWTG[" + appId + "]: " + dispatcher.getDispatcherName() + "; " + name;
    ThreadGroup threadGroup = new ThreadGroup(dispatcher.getCreateDispatcherThreadGroup(), threadGroupName);
    tfactory.setThreadGroup(threadGroup);
    tfactory.setThreadNamePrefix("JCWT[" + appId + "]: " + dispatcher.getDispatcherName() + "; " + name);
    executor.setThreadFactory(tfactory);
}

From source file:com.linkedin.pinot.transport.perf.ScatterGatherPerfClient.java

public ScatterGatherPerfClient(RoutingTableConfig config, int requestSize, String resourceName,
        boolean asyncRequestSubmit, int numRequests, int maxActiveConnections, int numReaderThreads) {
    _routingConfig = config;/* w  w  w. j  a  va  2 s  . c  o  m*/
    _reader = new BufferedReader(new InputStreamReader(System.in));
    StringBuilder s1 = new StringBuilder();
    for (int i = 0; i < requestSize; i++) {
        s1.append("a");
    }
    _request = s1.toString().getBytes();
    _resourceName = resourceName;
    _numRequests = numRequests;
    _asyncRequestSubmit = asyncRequestSubmit;
    _queue = new LinkedBlockingQueue<ScatterGatherPerfClient.QueueEntry>();
    _readerThreads = new ArrayList<AsyncReader>();
    if (asyncRequestSubmit) {
        for (int i = 0; i < numReaderThreads; i++) {
            _readerThreads.add(new AsyncReader(_queue, _latencyHistogram));
        }
    }
    _maxActiveConnections = maxActiveConnections;

    setup();
}

From source file:com.cloud.agent.Agent.java

public Agent(final IAgentShell shell) {
    _shell = shell;/*from w w w . ja  v a2 s. c  om*/
    _link = null;

    _connection = new NioClient("Agent", _shell.getHost(), _shell.getPort(), _shell.getWorkers(), this);

    Runtime.getRuntime().addShutdownHook(new ShutdownThread(this));

    _ugentTaskPool = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10,
            TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), new NamedThreadFactory("UgentTask"));

    _executor = new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS,
            new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("agentRequest-Handler"));
}

From source file:de.tu_dortmund.ub.data.util.TPUUtil.java

public static String executeInit(final String initResourceFile, final String serviceName,
        final Integer engineThreads, final Properties config, final int cnt) throws Exception {

    // create job
    final Callable<String> initTask = new Init(initResourceFile, config, cnt);

    // work on jobs
    final ThreadPoolExecutor pool = new ThreadPoolExecutor(engineThreads, engineThreads, 0L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<>());

    try {//from   www.  j ava 2 s.c o  m

        final List<Callable<String>> tasks = new LinkedList<>();
        tasks.add(initTask);

        final List<Future<String>> futureList = pool.invokeAll(tasks);
        final Iterator<Future<String>> iterator = futureList.iterator();

        if (iterator.hasNext()) {

            final Future<String> f = iterator.next();

            final String initResult = f.get();

            final String message1 = String.format("[%s][%d] initResult = '%s'", serviceName, cnt, initResult);

            LOG.info(message1);

            return initResult;
        }

    } catch (final Exception e) {

        LOG.error("[{]][{}] something went wrong at init part execution", serviceName, cnt, e);

        throw e;
    } finally {

        pool.shutdown();
    }

    return null;
}

From source file:eagle.jobrunning.crawler.RunningJobCrawlerImpl.java

private void startJobConfigProcessThread() {
    int configThreadCount = DEFAULT_CONFIG_THREAD_COUNT;
    LOG.info("Job Config crawler main thread started, pool size: " + DEFAULT_CONFIG_THREAD_COUNT);

    ThreadFactory factory = new ThreadFactory() {
        private final AtomicInteger count = new AtomicInteger(0);

        public Thread newThread(Runnable runnable) {
            count.incrementAndGet();//from w w w .  j av a  2  s .c  o  m
            Thread thread = Executors.defaultThreadFactory().newThread(runnable);
            thread.setName("config-crawler-workthread-" + count.get());
            return thread;
        }
    };

    ThreadPoolExecutor pool = new ThreadPoolExecutor(configThreadCount, configThreadCount, 0L,
            TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), factory);

    while (true) {
        JobContext context;
        try {
            context = queueOfConfig.take();
            LOG.info("queueOfConfig size: " + queueOfConfig.size());
            Runnable configCrawlerThread = new ConfigWorkTask(new JobContext(context), fetcher, callback, this);
            pool.execute(configCrawlerThread);
        } catch (InterruptedException e) {
            LOG.warn("Got an InterruptedException: " + e.getMessage());
        } catch (RejectedExecutionException e2) {
            LOG.warn("Got RejectedExecutionException: " + e2.getMessage());
        } catch (Throwable t) {
            LOG.warn("Got an throwable t, " + t.getMessage());
        }
    }
}

From source file:dk.netarkivet.harvester.indexserver.CrawlLogIndexCache.java

/** Combine a number of crawl.log files into one Lucene index.  This index
 * is placed as gzip files under the directory returned by getCacheFile().
 *
 * @param rawfiles The map from job ID into crawl.log contents. No
 * null values are allowed in this map./*from  w ww.j  a v  a  2  s. co m*/
 */
protected void combine(Map<Long, File> rawfiles) {
    indexingJobCount++;
    long datasetSize = rawfiles.values().size();
    log.info("Starting combine task #" + indexingJobCount + ". This combines a dataset with " + datasetSize
            + " crawl logs (thread = " + Thread.currentThread().getName() + ")");

    File resultDir = getCacheFile(rawfiles.keySet());
    Set<File> tmpfiles = new HashSet<File>();
    String indexLocation = resultDir.getAbsolutePath() + ".luceneDir";
    ThreadPoolExecutor executor = null;
    try {
        DigestIndexer indexer = createStandardIndexer(indexLocation);
        final boolean verboseIndexing = false;
        DigestOptions indexingOptions = new DigestOptions(this.useBlacklist, verboseIndexing, this.mimeFilter);
        long count = 0;
        Set<IndexingState> outstandingJobs = new HashSet<IndexingState>();
        final int maxThreads = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAXTHREADS);
        executor = new ThreadPoolExecutor(maxThreads, maxThreads, 0L, TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<Runnable>());

        executor.setRejectedExecutionHandler(new ThreadPoolExecutor.CallerRunsPolicy());

        for (Map.Entry<Long, File> entry : rawfiles.entrySet()) {
            Long jobId = entry.getKey();
            File crawlLog = entry.getValue();
            // Generate UUID to ensure a unique filedir for the index.
            File tmpFile = new File(FileUtils.getTempDir(), UUID.randomUUID().toString());
            tmpfiles.add(tmpFile);
            String localindexLocation = tmpFile.getAbsolutePath();
            Long cached = cdxcache.cache(jobId);
            if (cached == null) {
                log.warn("Skipping the ingest of logs for job " + entry.getKey()
                        + ". Unable to retrieve cdx-file for job.");
                continue;
            }
            File cachedCDXFile = cdxcache.getCacheFile(cached);

            // Dispatch this indexing task to a separate thread that 
            // handles the sorting of the logfiles and the generation
            // of a lucene index for this crawllog and cdxfile.
            count++;
            String taskID = count + " out of " + datasetSize;
            log.debug("Making subthread for indexing job " + jobId + " - task " + taskID);
            Callable<Boolean> task = new DigestIndexerWorker(localindexLocation, jobId, crawlLog, cachedCDXFile,
                    indexingOptions, taskID);
            Future<Boolean> result = executor.submit(task);
            outstandingJobs.add(new IndexingState(jobId, localindexLocation, result));
        }

        // wait for all the outstanding subtasks to complete.
        Set<Directory> subindices = new HashSet<Directory>();

        // Deadline for the combine-task
        long combineTimeout = Settings.getLong(HarvesterSettings.INDEXSERVER_INDEXING_TIMEOUT);
        long timeOutTime = System.currentTimeMillis() + combineTimeout;

        // The indexwriter for the totalindex.
        IndexWriter totalIndex = indexer.getIndex();
        int subindicesInTotalIndex = 0;
        // Max number of segments in totalindex.
        int maxSegments = Settings.getInt(HarvesterSettings.INDEXSERVER_INDEXING_MAX_SEGMENTS);

        final int ACCUMULATED_SUBINDICES_BEFORE_MERGING = 200;

        while (outstandingJobs.size() > 0) {
            log.info("Outstanding jobs in combine task #" + indexingJobCount + " is now "
                    + outstandingJobs.size());
            Iterator<IndexingState> iterator = outstandingJobs.iterator();
            if (timeOutTime < System.currentTimeMillis()) {
                log.warn("Max indexing time exceeded for one index ("
                        + TimeUtils.readableTimeInterval(combineTimeout) + "). Indexing stops here, although"
                        + " missing subindices for " + outstandingJobs.size() + " jobs");
                break;
            }
            while (iterator.hasNext() && subindices.size() < ACCUMULATED_SUBINDICES_BEFORE_MERGING) {
                Future<Boolean> nextResult;
                IndexingState next = iterator.next();
                if (next.getResultObject().isDone()) {
                    nextResult = next.getResultObject();
                    try {
                        // check, if the indexing failed
                        if (nextResult.get()) {
                            subindices.add(new SimpleFSDirectory(new File(next.getIndex())));
                        } else {
                            log.warn("Indexing of job " + next.getJobIdentifier() + " failed.");
                        }

                    } catch (InterruptedException e) {
                        log.warn("Unable to get Result back from " + "indexing thread", e);
                    } catch (ExecutionException e) {
                        log.warn("Unable to get Result back from " + "indexing thread", e);
                    }
                    //remove the done object from the set
                    iterator.remove();
                }
            }

            if (subindices.size() >= ACCUMULATED_SUBINDICES_BEFORE_MERGING) {

                log.info("Adding " + subindices.size()
                        + " subindices to main index. Forcing index to contain max " + maxSegments
                        + " files (related to combine task # " + indexingJobCount + ")");
                totalIndex.addIndexes(subindices.toArray(new Directory[0]));
                totalIndex.forceMerge(maxSegments);
                totalIndex.commit();
                for (Directory luceneDir : subindices) {
                    luceneDir.close();
                }
                subindicesInTotalIndex += subindices.size();
                log.info("Completed adding " + subindices.size() + " subindices to main index, now containing "
                        + subindicesInTotalIndex + " subindices" + "(related to combine task # "
                        + indexingJobCount + ")");
                subindices.clear();
            } else {
                sleepAwhile();
            }
        }

        log.info("Adding the final " + subindices.size()
                + " subindices to main index. Forcing index to contain max " + maxSegments + " files "
                + "(related to combine task # " + indexingJobCount + ")");

        totalIndex.addIndexes(subindices.toArray(new Directory[0]));
        totalIndex.forceMerge(maxSegments);
        totalIndex.commit();
        for (Directory luceneDir : subindices) {
            luceneDir.close();
        }
        subindices.clear();

        log.info("Adding operation completed (combine task # " + indexingJobCount + ")!");
        long docsInIndex = totalIndex.numDocs();

        indexer.close();
        log.info("Closed index (related to combine task # " + indexingJobCount);

        // Now the index is made, gzip it up.
        File totalIndexDir = new File(indexLocation);
        log.info("Gzip-compressing the individual " + totalIndexDir.list().length
                + " index files of combine task # " + indexingJobCount);
        ZipUtils.gzipFiles(totalIndexDir, resultDir);
        log.info("Completed combine task # " + indexingJobCount + " that combined a dataset with " + datasetSize
                + " crawl logs (entries in combined index: " + docsInIndex + ") - compressed index has size "
                + FileUtils.getHumanReadableFileSize(resultDir));
    } catch (IOException e) {
        throw new IOFailure("Error setting up craw.log index framework for " + resultDir.getAbsolutePath(), e);
    } finally {
        // close down Threadpool-executor
        closeDownThreadpoolQuietly(executor);
        FileUtils.removeRecursively(new File(indexLocation));
        for (File temporaryFile : tmpfiles) {
            FileUtils.removeRecursively(temporaryFile);
        }
    }
}

From source file:com.datatorrent.lib.dedup.AbstractDeduper.java

public AbstractDeduper() {
    waitingEvents = Maps.newHashMap();/*ww w.j a v  a 2 s.com*/
    partitionKeys = Sets.newHashSet(0);
    partitionMask = 0;

    fetchedBuckets = new LinkedBlockingQueue<AbstractBucket<INPUT>>();
    counters = new BasicCounters<MutableLong>(MutableLong.class);
}