Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:at.sti2.sparkwave.SparkwaveKernel.java

/**
 * Starts all relevant threads for a pattern and keeps track of them
 * @param pattern/*from   w w  w .ja v  a 2 s . c  om*/
 */
public void addProcessorThread(Pattern pattern) {

    //Create SparkwaveNetwork
    SparkwaveNetwork sparkwaveNetwork = new SparkwaveNetwork(pattern);
    sparkwaveNetwork.init();

    //Every pattern gets its own queue
    BlockingQueue<Triple> queue = new ArrayBlockingQueue<Triple>(10);
    queues.add(queue);

    //Create SparkwaveProcessorThread
    ProcessorThread sparkwaveProcessor = new ProcessorThread(sparkwaveNetwork, queue);
    Thread thread = new Thread(sparkwaveProcessor);
    thread.setName("Processor-" + thread.getName());
    thread.start();

    patternThreadMap.put(pattern, thread);
    patternQueueMap.put(pattern, queue);
    idPatternMap.put(pattern.getId(), pattern);

}

From source file:ubic.gemma.loader.genome.gene.ncbi.NcbiGeneLoader.java

/**
 * @param geneInfoFile// www. j  ava 2s.c  o m
 * @param gene2AccFile
 * @param geneHistoryFile
 * @param geneEnsemblFile
 * @param supportedTaxa can be null if we just want everything
 */
private void load(String geneInfoFile, String gene2AccFile, String geneHistoryFile, String geneEnsemblFile,
        Collection<Taxon> supportedTaxa) {
    /*
     * In case this is reused.
     */
    this.generatorDone.set(false);
    this.converterDone.set(false);
    this.loaderDone.set(false);

    NcbiGeneDomainObjectGenerator sdog = new NcbiGeneDomainObjectGenerator(supportedTaxa);
    sdog.setDoDownload(doDownload);
    sdog.setProducerDoneFlag(generatorDone);
    sdog.setStartingNcbiId(startingNcbiId);

    NcbiGeneConverter converter = new NcbiGeneConverter();
    converter.setSourceDoneFlag(generatorDone);
    converter.setProducerDoneFlag(converterDone);

    // create queue for GeneInfo objects
    final BlockingQueue<NcbiGeneData> geneInfoQueue = new ArrayBlockingQueue<NcbiGeneData>(QUEUE_SIZE);
    final BlockingQueue<Gene> geneQueue = new ArrayBlockingQueue<Gene>(QUEUE_SIZE);

    // Threaded producer - loading files into queue as GeneInfo objects
    if (StringUtils.isEmpty(geneInfoFile) || StringUtils.isEmpty(geneInfoFile)) {
        sdog.generate(geneInfoQueue);
    } else {
        sdog.generateLocal(geneInfoFile, gene2AccFile, geneHistoryFile, geneEnsemblFile, geneInfoQueue);
    }

    // Threaded consumer/producer - consumes GeneInfo objects and generates
    // Gene/GeneProduct/DatabaseEntry entries
    converter.convert(geneInfoQueue, geneQueue);

    // Threaded consumer. Consumes Gene objects and persists them into
    // the database
    this.load(geneQueue);

    // update taxon table to indicate that now there are genes loaded for that taxa.
    // all or nothing so that if fails for some taxa then no taxa will be updated.
    this.updateTaxaWithGenesUsable(sdog.getSupportedTaxaWithNCBIGenes());
}

From source file:com.pinterest.rocksplicator.controller.DispatcherTest.java

@Test
public void testRetryTask() throws Exception {
    final String errorMsg = "Boom!!!";
    TaskBase task = new ThrowingTask(errorMsg).retry(3).getEntity();
    final CountDownLatch latch = new CountDownLatch(3);
    FIFOTaskQueue tq = new FIFOTaskQueue(10) {
        @Override//from   w  ww  .  j  ava  2 s.  c  o m
        public boolean finishTask(final long id, final String output) {
            latch.countDown();
            return super.finishTask(id, output);
        }

        @Override
        public long finishTaskAndEnqueueRunningTask(final long id, final String output, final TaskBase newTask,
                final String worker) {
            latch.countDown();
            return super.finishTaskAndEnqueueRunningTask(id, output, newTask, worker);
        }
    };
    tq.enqueueTask(task, Integer.toString(++nameCounter), 0);

    Semaphore idleWorkersSemaphore = new Semaphore(2);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(2, 2, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(2));
    WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, tq);
    TaskDispatcher dispatcher = new TaskDispatcher(2, idleWorkersSemaphore, workerPool, tq);
    dispatcher.start();

    Assert.assertTrue(latch.await(30, TimeUnit.SECONDS));
    Assert.assertEquals(tq.getResult(0), errorMsg);
    Assert.assertEquals(tq.getResult(1), errorMsg);
    Assert.assertEquals(tq.getResult(2), errorMsg);
    dispatcher.stop();
}

From source file:nl.uva.sne.disambiguators.Wikidata.java

private Map<String, List<String>> getbroaderIDS(Set<Term> terms)
        throws MalformedURLException, InterruptedException, ExecutionException {
    Map<String, List<String>> map = new HashMap<>();
    if (terms.size() > 0) {
        int maxT = 2;
        BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT);
        ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue);
        //            ExecutorService pool = new ThreadPoolExecutor(maxT, maxT,
        //                    5000L, TimeUnit.MILLISECONDS,
        //                    new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy());

        Set<Future<Map<String, List<String>>>> set1 = new HashSet<>();
        String prop = "P31";
        for (Term t : terms) {
            URL url = new URL(
                    page + "?action=wbgetclaims&format=json&props=&property=" + prop + "&entity=" + t.getUID());
            System.err.println(url);
            WikiRequestor req = new WikiRequestor(url, t.getUID(), 1);
            Future<Map<String, List<String>>> future = pool.submit(req);
            set1.add(future);/*from   w ww  .  j  av a2s .  c o  m*/
        }
        pool.shutdown();

        for (Future<Map<String, List<String>>> future : set1) {
            while (!future.isDone()) {
                //                Logger.getLogger(Wikipedia.class.getName()).log(Level.INFO, "Task is not completed yet....");
                Thread.currentThread().sleep(10);
            }
            Map<String, List<String>> c = future.get();
            if (c != null) {
                map.putAll(c);
            }
        }
    }

    return map;
}

From source file:org.mitre.mpf.mst.TestSystemStress.java

@Test(timeout = 180 * MINUTES)
public void runFaceOcvDetectImageManyJobs() throws Exception {
    testCtr++;/*from ww w .  ja v  a 2  s. c  o m*/
    log.info("Beginning test #{} runFaceOcvDetectImageManyJobs()", testCtr);
    IOFileFilter fileFilter = FileFilterUtils.and(FileFilterUtils.fileFileFilter(),
            FileFilterUtils.suffixFileFilter(".jpg"));

    int numExtractors = 6; // number of extractors on Jenkins (* number of nodes, now 1)
    //        int numExtractors = 2;  // number of extractors on local VM * 1 node

    // for testing on local VM only
    //        Collection<File> files = FileUtils.listFiles(new File(getClass().getClassLoader().getResource("samples/face").getFile()),
    //            fileFilter, null);

    // for testing on Jenkins
    // 10,000 jpgs
    Collection<File> files = FileUtils.listFiles(new File("/mpfdata/datasets/mugshots_10000"), fileFilter,
            null);

    BlockingQueue<File> fQueue = new ArrayBlockingQueue<File>(files.size());
    for (File file : files) {
        fQueue.put(file);
    }
    ExecutorService executor = Executors.newFixedThreadPool(numExtractors);
    JobRunner[] jobRunners = new JobRunner[numExtractors];
    for (int i = 0; i < numExtractors; i++) {
        jobRunners[i] = new JobRunner(fQueue);
        executor.submit(jobRunners[i]);
    }
    executor.shutdown();
    executor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);

    Assert.assertEquals("Number of files to process doesn't match actual number of jobs run (one job/file):",
            files.size(), manyJobsNumFilesProcessed);
    log.info("Successfully ran {} jobs for {} files, one file per job.", manyJobsNumFilesProcessed,
            files.size());
    log.info("Finished test runFaceOcvDetectImageManyJobs()");
}

From source file:org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.java

public BucketCache(String ioEngineName, long capacity, int blockSize, int writerThreadNum, int writerQLen,
        String persistencePath, int ioErrorsTolerationDuration) throws FileNotFoundException, IOException {
    this.ioEngine = getIOEngineFromName(ioEngineName, capacity);
    this.writerThreads = new WriterThread[writerThreadNum];
    this.cacheWaitSignals = new Object[writerThreadNum];
    long blockNumCapacity = capacity / blockSize;
    if (blockNumCapacity >= Integer.MAX_VALUE) {
        // Enough for about 32TB of cache!
        throw new IllegalArgumentException("Cache capacity is too large, only support 32TB now");
    }//  w w  w.j  a v  a2 s .c  o m

    this.cacheCapacity = capacity;
    this.persistencePath = persistencePath;
    this.blockSize = blockSize;
    this.ioErrorsTolerationDuration = ioErrorsTolerationDuration;

    bucketAllocator = new BucketAllocator(capacity);
    for (int i = 0; i < writerThreads.length; ++i) {
        writerQueues.add(new ArrayBlockingQueue<RAMQueueEntry>(writerQLen));
        this.cacheWaitSignals[i] = new Object();
    }

    assert writerQueues.size() == writerThreads.length;
    this.ramCache = new ConcurrentHashMap<BlockCacheKey, RAMQueueEntry>();

    this.backingMap = new ConcurrentHashMap<BlockCacheKey, BucketEntry>((int) blockNumCapacity);

    if (ioEngine.isPersistent() && persistencePath != null) {
        try {
            retrieveFromFile();
        } catch (IOException ioex) {
            LOG.error("Can't restore from file because of", ioex);
        } catch (ClassNotFoundException cnfe) {
            LOG.error("Can't restore from file in rebuild because can't deserialise", cnfe);
            throw new RuntimeException(cnfe);
        }
    }
    final String threadName = Thread.currentThread().getName();
    this.cacheEnabled = true;
    for (int i = 0; i < writerThreads.length; ++i) {
        writerThreads[i] = new WriterThread(writerQueues.get(i), i);
        writerThreads[i].setName(threadName + "-BucketCacheWriter-" + i);
        writerThreads[i].start();
    }
    // Run the statistics thread periodically to print the cache statistics log
    this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this), statThreadPeriod, statThreadPeriod,
            TimeUnit.SECONDS);
    LOG.info("Started bucket cache; ioengine=" + ioEngineName + ", capacity=" + StringUtils.byteDesc(capacity)
            + ", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" + writerThreadNum
            + ", writerQLen=" + writerQLen + ", persistencePath=" + persistencePath);
}

From source file:com.nridge.connector.ws.con_ws.task.TaskConnectorWS.java

/**
  * If this task is scheduled to be executed (e.g. its run/test
  * name matches the command line arguments), then this method
  * is guaranteed to be executed prior to the thread being
  * started.//from w w w .ja  v  a 2 s.c o  m
  *
  * @param anAppMgr Application manager instance.
  *
  * @throws com.nridge.core.base.std.NSException Application specific exception.
  */
@Override
public void init(AppMgr anAppMgr) throws NSException {
    mAppMgr = anAppMgr;
    Logger appLogger = mAppMgr.getLogger(this, "init");

    appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER);

    mIsAlive = new AtomicBoolean(false);

    // Write our configuration properties for troubleshooting purposes.

    mAppMgr.writeCfgProperties(appLogger);

    // Assign our between crawl sleep time.

    mSleepTimeInMinutes = 15;
    String sleepTimeString = mAppMgr.getString(Constants.CFG_PROPERTY_PREFIX + ".run_sleep_between");
    if (StringUtils.endsWithIgnoreCase(sleepTimeString, "m")) {
        String minuteString = StringUtils.stripEnd(sleepTimeString, "m");
        if ((StringUtils.isNotEmpty(minuteString)) && (StringUtils.isNumeric(minuteString)))
            mSleepTimeInMinutes = Integer.parseInt(minuteString);
    } else if ((StringUtils.isNotEmpty(sleepTimeString)) && (StringUtils.isNumeric(sleepTimeString)))
        mSleepTimeInMinutes = Integer.parseInt(sleepTimeString);

    // The extract queue holds documents that have been extracted from the content source.

    int extractQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".extract.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue extractQueue = new ArrayBlockingQueue(extractQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_EXTRACT_NAME, extractQueue);

    // The transform queue holds documents that have been transformed after extraction.

    int transformQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".transform.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue transformQueue = new ArrayBlockingQueue(transformQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_TRANSFORM_NAME, transformQueue);

    // The publish queue holds documents that have been published to the search index.

    int publishQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".publish.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue publishQueue = new ArrayBlockingQueue(publishQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_PUBLISH_NAME, publishQueue);

    // Load our schema definition from the data source folder.

    DataBag schemaBag;
    String schemaPathFileName = String.format("%s%c%s", mAppMgr.getString(mAppMgr.APP_PROPERTY_DS_PATH),
            File.separatorChar, Constants.SCHEMA_FILE_NAME);
    DataBagXML dataBagXML = new DataBagXML();
    try {
        dataBagXML.load(schemaPathFileName);
        schemaBag = dataBagXML.getBag();
    } catch (Exception e) {
        String msgStr = String.format("%s: %s", schemaPathFileName, e.getMessage());
        appLogger.error(msgStr);
        appLogger.warn("Using internal document schema as alternative - data source schema ignored.");
        schemaBag = schemaBag();
    }

    mAppMgr.addProperty(Connector.PROPERTY_SCHEMA_NAME, schemaBag);

    // Create our mail manager instance.

    MailManager mailManager = new MailManager(mAppMgr, Constants.CFG_PROPERTY_PREFIX + ".mail");
    mAppMgr.addProperty(Connector.PROPERTY_MAIL_NAME, mailManager);

    // Create/Load service time tracking file.

    mServiceTimer = new ServiceTimer(mAppMgr);
    mServiceTimer.setPropertyPrefix(Constants.CFG_PROPERTY_PREFIX);
    String stPathFileName = mServiceTimer.createServicePathFileName();
    File stFile = new File(stPathFileName);
    if (stFile.exists())
        mServiceTimer.load();

    // Is there an explicit list of phases to execute?

    String propertyName = Constants.CFG_PROPERTY_PREFIX + ".phase_list";
    String phaseProperty = mAppMgr.getString(propertyName);
    if (StringUtils.isNotEmpty(phaseProperty)) {
        if (mAppMgr.isPropertyMultiValue(propertyName))
            mPhases = mAppMgr.getStringArray(propertyName);
        else {
            mPhases = new String[1];
            mPhases[0] = phaseProperty;
        }
    }

    // Load and assign our crawl follow and ignore instances.

    CrawlFollow crawlFollow = new CrawlFollow(mAppMgr);
    crawlFollow.setCfgPropertyPrefix(Constants.CFG_PROPERTY_PREFIX + ".extract");
    try {
        crawlFollow.load();
    } catch (NSException | IOException e) {
        String msgStr = String.format("Crawl Follow: %s", e.getMessage());
        appLogger.error(msgStr);
    }
    mAppMgr.addProperty(Constants.PROPERTY_CRAWL_FOLLOW, crawlFollow);

    CrawlIgnore crawlIgnore = new CrawlIgnore(mAppMgr);
    crawlIgnore.setCfgPropertyPrefix(Constants.CFG_PROPERTY_PREFIX + ".extract");
    try {
        crawlIgnore.load();
    } catch (NSException | IOException e) {
        String msgStr = String.format("Crawl Ignore: %s", e.getMessage());
        appLogger.error(msgStr);
    }
    mAppMgr.addProperty(Constants.PROPERTY_CRAWL_IGNORE, crawlIgnore);

    // Clear out crawl queue from previous service sessions.

    CrawlQueue crawlQueue = new CrawlQueue(mAppMgr);
    crawlQueue.reset();

    appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART);

    mIsAlive.set(true);
}

From source file:com.nridge.connector.fs.con_fs.task.TaskConnectorFS.java

/**
  * If this task is scheduled to be executed (e.g. its run/test
  * name matches the command line arguments), then this method
  * is guaranteed to be executed prior to the thread being
  * started.// ww  w . j a  v  a 2 s  .  c  o m
  *
  * @param anAppMgr Application manager instance.
  *
  * @throws com.nridge.core.base.std.NSException Application specific exception.
  */
@Override
public void init(AppMgr anAppMgr) throws NSException {
    mAppMgr = anAppMgr;
    Logger appLogger = mAppMgr.getLogger(this, "init");

    appLogger.trace(mAppMgr.LOGMSG_TRACE_ENTER);

    mIsAlive = new AtomicBoolean(false);

    // Write our configuration properties for troubleshooting purposes.

    mAppMgr.writeCfgProperties(appLogger);

    // Assign our between crawl sleep time.

    mSleepTimeInMinutes = 15;
    String sleepTimeString = mAppMgr.getString(Constants.CFG_PROPERTY_PREFIX + ".run_sleep_between");
    if (StringUtils.endsWithIgnoreCase(sleepTimeString, "m")) {
        String minuteString = StringUtils.stripEnd(sleepTimeString, "m");
        if ((StringUtils.isNotEmpty(minuteString)) && (StringUtils.isNumeric(minuteString)))
            mSleepTimeInMinutes = Integer.parseInt(minuteString);
    } else if ((StringUtils.isNotEmpty(sleepTimeString)) && (StringUtils.isNumeric(sleepTimeString)))
        mSleepTimeInMinutes = Integer.parseInt(sleepTimeString);

    // The extract queue holds documents that have been extracted from the content source.

    int extractQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".extract.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue extractQueue = new ArrayBlockingQueue(extractQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_EXTRACT_NAME, extractQueue);

    // The transform queue holds documents that have been transformed after extraction.

    int transformQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".transform.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue transformQueue = new ArrayBlockingQueue(transformQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_TRANSFORM_NAME, transformQueue);

    // The publish queue holds documents that have been published to the search index.

    int publishQueueSize = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".publish.queue_length",
            Connector.QUEUE_LENGTH_DEFAULT);
    BlockingQueue publishQueue = new ArrayBlockingQueue(publishQueueSize);
    mAppMgr.addProperty(Connector.QUEUE_PUBLISH_NAME, publishQueue);

    // Load our schema definition from the data source folder.

    DataBag schemaBag;
    String schemaPathFileName = String.format("%s%c%s", mAppMgr.getString(mAppMgr.APP_PROPERTY_DS_PATH),
            File.separatorChar, Constants.SCHEMA_FILE_NAME);
    DataBagXML dataBagXML = new DataBagXML();
    try {
        dataBagXML.load(schemaPathFileName);
        schemaBag = dataBagXML.getBag();
    } catch (Exception e) {
        String msgStr = String.format("%s: %s", schemaPathFileName, e.getMessage());
        appLogger.error(msgStr);
        appLogger.warn("Using internal document schema as alternative - data source schema ignored.");
        schemaBag = schemaBag();
    }

    mAppMgr.addProperty(Connector.PROPERTY_SCHEMA_NAME, schemaBag);

    // Create our mail manager instance.

    MailManager mailManager = new MailManager(mAppMgr, Constants.CFG_PROPERTY_PREFIX + ".mail");
    mAppMgr.addProperty(Connector.PROPERTY_MAIL_NAME, mailManager);

    // Create/Load service time tracking file.

    mServiceTimer = new ServiceTimer(mAppMgr);
    mServiceTimer.setPropertyPrefix(Constants.CFG_PROPERTY_PREFIX);
    String stPathFileName = mServiceTimer.createServicePathFileName();
    File stFile = new File(stPathFileName);
    if (stFile.exists())
        mServiceTimer.load();

    // Is there an explicit list of phases to execute?

    String propertyName = Constants.CFG_PROPERTY_PREFIX + ".phase_list";
    String phaseProperty = mAppMgr.getString(propertyName);
    if (StringUtils.isNotEmpty(phaseProperty)) {
        if (mAppMgr.isPropertyMultiValue(propertyName))
            mPhases = mAppMgr.getStringArray(propertyName);
        else {
            mPhases = new String[1];
            mPhases[0] = phaseProperty;
        }
    }

    // Clear out crawl queue from previous service sessions.

    CrawlQueue crawlQueue = new CrawlQueue(mAppMgr);
    crawlQueue.reset();

    // Create Restlet server instance.

    int portNumber = mAppMgr.getInt(Constants.CFG_PROPERTY_PREFIX + ".restlet.port_number",
            Constants.APPLICATION_PORT_NUMBER_DEFAULT);
    RestletApplication restletApplication = new RestletApplication(mAppMgr);

    appLogger.info("Starting Restlet Server.");
    mServer = new Server(Protocol.HTTP, portNumber, restletApplication);
    try {
        mServer.start();
    } catch (Exception e) {
        appLogger.error("Restlet Server (start): " + e.getMessage(), e);
        throw new NSException(e.getMessage());
    }

    appLogger.trace(mAppMgr.LOGMSG_TRACE_DEPART);

    mIsAlive.set(true);
}

From source file:org.apache.nifi.processors.email.ConsumeEWS.java

@Override
public void onTrigger(ProcessContext context, ProcessSession processSession) throws ProcessException {
    if (this.messageQueue == null) {
        int fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
        this.messageQueue = new ArrayBlockingQueue<>(fetchSize);
    }/*from   www . j a  v  a  2s.  co  m*/

    this.folderName = context.getProperty(FOLDER).getValue();

    Message emailMessage = this.receiveMessage(context);
    if (emailMessage != null) {
        this.transfer(emailMessage, context, processSession);
    } else {
        //No new messages found, yield the processor
        context.yield();
    }
}

From source file:org.apache.apex.malhar.kafka.KafkaConsumerWrapper.java

/**
 * This method is called in setup method of Abstract Kafka Input Operator
 *//* w w  w.j a v a2s. c  o m*/
public void create(AbstractKafkaInputOperator ownerOperator) {
    holdingBuffer = new ArrayBlockingQueue<>(ownerOperator.getHoldingBufferSize());
    this.ownerOperator = ownerOperator;
    logger.info("Create consumer wrapper with holding buffer size: {} ", ownerOperator.getHoldingBufferSize());
    if (logger.isInfoEnabled()) {
        logger.info("Assignments are {} ", Joiner.on('\n').join(ownerOperator.assignment()));
    }
}