Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:org.datagator.tools.importer.impl.XlsxInputStreamExtractor.java

public XlsxInputStreamExtractor(InputStream stream) throws IOException {
    super(stream);
    _queue = new ArrayBlockingQueue<ImmutablePair<AtomType, Object>>(MAX_QUEUE_CAPACITY);
    final HSSFRequest request = new HSSFRequest();
    request.addListenerForAllRecords(new MissingRecordAwareHSSFListener(new EventTransformer(_queue)));
    final HSSFEventFactory factory = new HSSFEventFactory();
    factory.processEvents(request, stream);
}

From source file:com.alibaba.otter.node.etl.common.db.DbPerfIntergration.java

@Test
public void test_stack() {
    DbMediaSource dbMediaSource = new DbMediaSource();
    dbMediaSource.setId(1L);/*from  w  w  w. j av a 2s  . c  o  m*/
    dbMediaSource.setDriver("com.mysql.jdbc.Driver");
    dbMediaSource.setUsername("otter");
    dbMediaSource.setPassword("otter");
    dbMediaSource.setUrl("jdbc:mysql://127.0.0.1:3306/retl");
    dbMediaSource.setEncode("UTF-8");
    dbMediaSource.setType(DataMediaType.MYSQL);

    DbDataMedia dataMedia = new DbDataMedia();
    dataMedia.setSource(dbMediaSource);
    dataMedia.setId(1L);
    dataMedia.setName("ljhtable1");
    dataMedia.setNamespace("otter");

    final DbDialect dbDialect = dbDialectFactory.getDbDialect(2L, dataMedia.getSource());
    want.object(dbDialect).clazIs(MysqlDialect.class);

    final TransactionTemplate transactionTemplate = dbDialect.getTransactionTemplate();

    // ??
    int minute = 5;
    int nextId = 1;
    final int thread = 10;
    final int batch = 50;
    final String sql = "insert into otter.ljhtable1 values(? , ? , ? , ?)";

    final CountDownLatch latch = new CountDownLatch(thread);
    ExecutorService executor = new ThreadPoolExecutor(thread, thread, 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue(thread * 2), new NamedThreadFactory("load"),
            new ThreadPoolExecutor.CallerRunsPolicy());

    for (int sec = 0; sec < minute * 60; sec++) {
        // 
        long startTime = System.currentTimeMillis();
        for (int i = 0; i < thread; i++) {
            final int start = nextId + i * batch;
            executor.submit(new Runnable() {

                public void run() {
                    try {
                        transactionTemplate.execute(new TransactionCallback() {

                            public Object doInTransaction(TransactionStatus status) {
                                JdbcTemplate jdbcTemplate = dbDialect.getJdbcTemplate();
                                return jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {

                                    public void setValues(PreparedStatement ps, int idx) throws SQLException {
                                        int id = start + idx;
                                        StatementCreatorUtils.setParameterValue(ps, 1, Types.INTEGER, null, id);
                                        StatementCreatorUtils.setParameterValue(ps, 2, Types.VARCHAR, null,
                                                RandomStringUtils.randomAlphabetic(1000));
                                        // RandomStringUtils.randomAlphabetic()
                                        long time = new Date().getTime();
                                        StatementCreatorUtils.setParameterValue(ps, 3, Types.TIMESTAMP,
                                                new Timestamp(time));
                                        StatementCreatorUtils.setParameterValue(ps, 4, Types.TIMESTAMP,
                                                new Timestamp(time));
                                    }

                                    public int getBatchSize() {
                                        return batch;
                                    }
                                });
                            }
                        });
                    } finally {
                        latch.countDown();
                    }
                }
            });

        }

        long endTime = System.currentTimeMillis();
        try {
            latch.await(1000 * 60L - (endTime - startTime), TimeUnit.MILLISECONDS);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        if (latch.getCount() != 0) {
            System.out.println("perf is not enough!");
            System.exit(-1);
        }
        endTime = System.currentTimeMillis();
        System.out.println("Time cost : " + (System.currentTimeMillis() - startTime));
        try {
            TimeUnit.MILLISECONDS.sleep(1000L - (endTime - startTime));
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

        nextId = nextId + thread * batch;
    }
    executor.shutdown();
}

From source file:org.grouter.core.util.SchedulerService.java

/**
 * Start the scheduler using all available nodes.
 *
 * @throws Exception/*from  w  w w  . j a v a2s  . co  m*/
 */
public void startAllNodes() throws Exception {
    for (Node node : this.nodes) {
        logger.info("Scheduling node : " + node.getDisplayName());
        BlockingQueue<AbstractCommand> blockingQueue = new ArrayBlockingQueue<AbstractCommand>(QUEUE_CAPACITY);
        if (node.getInBound().getEndPointType().getId() == EndPointType.FILE_READER.getId()) {
            JobDetail jobDetail = new JobDetail(node.getInBound().getId().toString(), getTriggerGroup(node),
                    FileReaderJob.class);
            jobDetail.getJobDataMap().put("node", node);
            jobDetail.getJobDataMap().put("queue", blockingQueue);
            CronTrigger cronTrigger = new CronTrigger(getTriggerName(node, true), getTriggerGroup(node),
                    node.getInBound().getCron());
            scheduler.scheduleJob(jobDetail, cronTrigger);
        }
        if (node.getInBound().getEndPointType().getId() == EndPointType.FTP_READER.getId()) {

            JobDetail jobDetail = new JobDetail(node.getInBound().getId().toString(), getTriggerGroup(node),
                    FtpReaderJob.class);
            jobDetail.getJobDataMap().put("node", node);
            jobDetail.getJobDataMap().put("queue", blockingQueue);
            CronTrigger cronTrigger = new CronTrigger(getTriggerName(node, true), getTriggerGroup(node),
                    node.getInBound().getCron());
            scheduler.scheduleJob(jobDetail, cronTrigger);
        }
        if (node.getInBound().getEndPointType().getId() == EndPointType.JMS_READER.getId()) {
            JobDetail jobDetail = new JobDetail(node.getInBound().getId().toString(), getTriggerGroup(node),
                    JmsReaderJob.class);
            jobDetail.getJobDataMap().put("node", node);
            jobDetail.getJobDataMap().put("queue", blockingQueue);
            CronTrigger cronTrigger = new CronTrigger(getTriggerName(node, true), getTriggerGroup(node),
                    node.getInBound().getCron());
            scheduler.scheduleJob(jobDetail, cronTrigger);

        }

        if (node.getInBound().getEndPointType().getId() == EndPointType.HTTP_READER.getId()) {
            JobDetail jobDetail = new JobDetail(node.getInBound().getId().toString(), getTriggerGroup(node),
                    HttpReaderJob.class);
            jobDetail.getJobDataMap().put("node", node);
            jobDetail.getJobDataMap().put("queue", blockingQueue);
            CronTrigger cronTrigger = new CronTrigger(getTriggerName(node, true), getTriggerGroup(node),
                    node.getInBound().getCron());
            scheduler.scheduleJob(jobDetail, cronTrigger);

        }

        // For all WRITERS we need only create this
        if (node.getOutBound().getEndPointType().getId() == EndPointType.FILE_WRITER.getId()
                || node.getOutBound().getEndPointType().getId() == EndPointType.JMS_WRITER.getId()
                || node.getOutBound().getEndPointType().getId() == EndPointType.FTP_WRITER.getId()) {
            JobDetail jobDetail = new JobDetail(node.getOutBound().getId().toString(), getTriggerGroup(node),
                    CommandConsumerJob.class);
            jobDetail.getJobDataMap().put("node", node);
            jobDetail.getJobDataMap().put("queue", blockingQueue);
            CronTrigger cronTrigger = new CronTrigger(getTriggerName(node, false), getTriggerGroup(node),
                    node.getOutBound().getCron());
            scheduler.scheduleJob(jobDetail, cronTrigger);
        }
        node.setNodeStatus(NodeStatus.SCHEDULED_TO_START);
    }

    // Start the Scheduler
    scheduler.start();
}

From source file:com.pinterest.rocksplicator.controller.DispatcherTest.java

@Test
public void testSingleTaskLifeCycle() throws Exception {
    // Assuming there is only one task in the queue
    PowerMockito.when(taskQueue.dequeueTask(anyString())).thenReturn(getSleepIncrementTaskFromQueue())
            .thenReturn(null);/*from  w w  w. jav a 2  s .c o m*/
    Semaphore idleWorkersSemaphore = new Semaphore(1);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(1));
    WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, taskQueue);
    TaskDispatcher dispatcher = new TaskDispatcher(2, idleWorkersSemaphore, workerPool, taskQueue);
    dispatcher.start();
    // Wait for first task to be done
    synchronized (SleepIncrementTask.notifyObject) {
        SleepIncrementTask.notifyObject.wait();
    }
    verify(taskQueue, atLeastOnce()).dequeueTask(anyString());
    Assert.assertEquals(1, SleepIncrementTask.executionCounter.intValue());
    Assert.assertEquals(1, idleWorkersSemaphore.availablePermits());
    dispatcher.stop();
}

From source file:org.apache.hadoop.raid.PMDecoder.java

/**
 * @param forRecovery determine the type of this decoder, for recovery or for degraded read 
 *   (someday, i may combine them into the same function)
 *///  ww  w. ja va 2 s  .  c o  m
public PMDecoder(Configuration conf, int stripeSize, int paritySize, boolean forRecovery) {
    super(conf, stripeSize, paritySize);
    LOG.info("initial decoder: k=" + stripeSize + " m=" + paritySize + " bufSize:" + bufSize);

    threadNum = conf.getInt("hdfs.raid.decoder.threadnum", 1);

    //data queue, input to decode
    this.q = new BlockingQueue[threadNum];
    for (int i = 0; i < threadNum; i++)
        q[i] = new ArrayBlockingQueue<DecodePackage>(2048 / paritySize);

    //signal queue, decode to output
    this.p = new BlockingQueue[threadNum];
    for (int i = 0; i < threadNum; i++)
        p[i] = new ArrayBlockingQueue<Integer>(1024);

    Thread[] ds = new Thread[threadNum];
    for (int i = 0; i < threadNum; i++) {
        if (forRecovery) {
            PMRecoveryDecoder decoder = new PMRecoveryDecoder(i);
            ds[i] = new Thread(decoder);
        } else {
            PMDegradedReadDecoder decoder = new PMDegradedReadDecoder(i);
            ds[i] = new Thread(decoder);
        }
        ds[i].start();
    }

    LOG.info("PMDecoder 1/1");

}

From source file:com.flipkart.aesop.runtime.bootstrap.consumer.DefaultBlockingEventConsumer.java

@Override
public void afterPropertiesSet() throws Exception {
    this.numberOfPartition = Math.min(numberOfPartition, Runtime.getRuntime().availableProcessors());
    LOGGER.info("numberOfPartition used: " + numberOfPartition);
    for (int i = 0; i < numberOfPartition; i++) {
        BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(executorQueueSize);
        executors.add(new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, queue, rejectedExecutionHandler));
    }/*ww  w  .j a va 2 s.  co  m*/
}

From source file:org.apache.htrace.impl.FlumeSpanReceiver.java

public FlumeSpanReceiver(HTraceConfiguration conf) {
    this.queue = new ArrayBlockingQueue<Span>(1000);
    this.tf = new SimpleThreadFactory();
    configure(conf);/*from   w w w.  j av a 2s.c o m*/
}

From source file:org.sourceopen.hadoop.hbase.replication.consumer.FileChannelManager.java

public void init() throws Exception {
    if (LOG.isInfoEnabled()) {
        LOG.info("FileChannelManager is pendding to start.");
    }//w w  w.j av a  2s  .c om
    conf.addResource(ConsumerConstants.COMMON_CONFIG_FILE);
    conf.addResource(ConsumerConstants.CONSUMER_CONFIG_FILE);
    adapter = ProtocolAdapter.getAdapter(conf);

    fileChannelPool = new ThreadPoolExecutor(
            conf.getInt(ConsumerConstants.CONFKEY_REP_FILE_CHANNEL_POOL_SIZE, 10),
            conf.getInt(ConsumerConstants.CONFKEY_REP_FILE_CHANNEL_POOL_SIZE, 10),
            conf.getInt(ConsumerConstants.CONFKEY_THREADPOOL_KEEPALIVE_TIME, 100), TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(conf.getInt(ConsumerConstants.CONFKEY_THREADPOOL_SIZE, 100)));
    // ??
    //  ProtocolAdapter.listHead() ? Head ??? ProtocolAdapter 
    fs = FileSystem.get(URI.create(conf.get(HDFSFileAdapter.CONFKEY_HDFS_FS)), conf);
    zoo = HRepConfigUtil.createAdvZooKeeperByHBaseConfig(conf, new NothingZookeeperWatch());
    root = ZNodeFactory.createZNode(zoo,
            conf.get(ConsumerConstants.CONFKEY_ROOT_ZOO, ConsumerConstants.ROOT_ZOO), true);
    if (LOG.isInfoEnabled()) {
        LOG.info("FileChannelManager init.");
    }
}

From source file:com.datatorrent.contrib.kinesis.KinesisConsumer.java

/**
 * This method is called in setup method of the operator
 *///w w  w.  j a v a  2  s .c o  m
public void create() {
    holdingBuffer = new ArrayBlockingQueue<Pair<String, Record>>(bufferSize);
    boolean defaultSelect = (shardIds == null) || (shardIds.size() == 0);
    final List<Shard> pms = KinesisUtil.getInstance().getShardList(streamName);
    for (final Shard shId : pms) {
        if ((shardIds.contains(shId.getShardId()) || defaultSelect) && !closedShards.contains(shId)) {
            simpleConsumerThreads.add(shId);
        }
    }
}

From source file:de.ufinke.cubaja.sort.SortManager.java

public SortManager(SortConfig config, Comparator<?> comparator) {

    myId = getId();/* ww w  . ja  va2 s.  co m*/

    this.config = config;
    this.comparator = comparator;

    if (config.isLog()) {
        logger = LogFactory.getLog(Sorter.class);
        logPrefix = "Sort#" + myId + ": ";
        stopwatch = new Stopwatch();
    } else {
        logger = null;
        logPrefix = null;
        stopwatch = null;
    }
    logInterval = config.getLogInterval() * 1000;

    algorithm = config.getAlgorithm();

    int blockSize = config.getBlockSize();
    if (blockSize == 0) {
        blockSize = DEFAULT_BLOCK_SIZE;
    }
    if (blockSize < MINIMUM_BLOCK_SIZE) {
        blockSize = MINIMUM_BLOCK_SIZE;
    }
    this.blockSize = blockSize;

    int runSize = config.getRunSize();
    if (runSize == 0) {
        runSize = DEFAULT_RUN_SIZE;
    }
    if (runSize < MINIMUM_RUN_SIZE) {
        runSize = MINIMUM_RUN_SIZE;
    }
    this.runSize = runSize;

    int arrayCount = 1;
    int arraySize = runSize;
    while (arraySize > MAX_ARRAY_SIZE) {
        arraySize = arraySize >> 1;
        arrayCount = arrayCount << 1;
    }
    this.arrayCount = arrayCount;
    this.arraySize = arraySize;

    int queueCapacity = (arrayCount >> 1) + (arrayCount >> 4) + 1;
    sortQueue = new ArrayBlockingQueue<Request>(queueCapacity);
    fileQueue = new ArrayBlockingQueue<Request>(queueCapacity);
    mainQueue = new ArrayBlockingQueue<Request>(queueCapacity);

    executor = Executors.newCachedThreadPool(createThreadFactory());

    if (isDebug()) {
        putCount = new AtomicLong();
        getCount = new AtomicLong();
        debug("sortOpen", runSize, blockSize, algorithm.getClass().getName());
        if (isTrace()) {
            initTimer(logger, logPrefix, "sortPut", putCount);
        }
    }
}