Example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

List of usage examples for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue

Introduction

In this page you can find the example usage for java.util.concurrent ArrayBlockingQueue ArrayBlockingQueue.

Prototype

public ArrayBlockingQueue(int capacity) 

Source Link

Document

Creates an ArrayBlockingQueue with the given (fixed) capacity and default access policy.

Usage

From source file:org.apache.phoenix.trace.TraceSpanReceiver.java

public TraceSpanReceiver() {
    this.spanQueue = new ArrayBlockingQueue<Span>(CAPACITY);
}

From source file:com.roncoo.pay.app.settlement.utils.SettThreadPoolExecutor.java

public void init() {
    if (workQueueSize < 1) {
        workQueueSize = 1000;/*w ww  .  j ava  2s .  c  o m*/
    }
    if (this.keepAliveTime < 1) {
        this.keepAliveTime = 1000;
    }
    int coreSize = 0;
    if (this.corePoolSize < 1) {
        coreSize = Runtime.getRuntime().availableProcessors();
        maxPoolSize = Math.round(((float) (coreSize * notifyRadio)) / 10);
        corePoolSize = coreSize / 4;
        if (corePoolSize < 1) {
            corePoolSize = 1;
        }
    }

    // NOTICE: corePoolSize?maxPoolSize?
    if (maxPoolSize < corePoolSize) {
        maxPoolSize = corePoolSize;
    }

    /**
     * ThreadPoolExecutor??BlockingQueue????workQueue.take()?
     */
    BlockingQueue<Runnable> notifyWorkQueue = new ArrayBlockingQueue<Runnable>(workQueueSize);

    executor = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime, TimeUnit.SECONDS,
            notifyWorkQueue, new ThreadPoolExecutor.CallerRunsPolicy());

    LOG.info("NotifyExecutor Info : CPU = " + coreSize + " | corePoolSize = " + corePoolSize
            + " | maxPoolSize = " + maxPoolSize + " | workQueueSize = " + workQueueSize);
}

From source file:org.apache.http.impl.client.cache.AsynchronousValidator.java

/**
 * Create AsynchronousValidator which will make revalidation requests
 * using the supplied {@link CachingHttpClient}, and 
 * a {@link ThreadPoolExecutor} generated according to the thread
 * pool settings provided in the given {@link CacheConfig}. 
 * @param cachingClient used to execute asynchronous requests
 * @param config specifies thread pool settings. See
 * {@link CacheConfig#getAsynchronousWorkersMax()},
 * {@link CacheConfig#getAsynchronousWorkersCore()},
 * {@link CacheConfig#getAsynchronousWorkerIdleLifetimeSecs()},
 * and {@link CacheConfig#getRevalidationQueueSize()}. 
 *///from w ww  . j a  v  a2s.c o m
public AsynchronousValidator(CachingHttpClient cachingClient, CacheConfig config) {
    this(cachingClient,
            new ThreadPoolExecutor(config.getAsynchronousWorkersCore(), config.getAsynchronousWorkersMax(),
                    config.getAsynchronousWorkerIdleLifetimeSecs(), TimeUnit.SECONDS,
                    new ArrayBlockingQueue<Runnable>(config.getRevalidationQueueSize())));
}

From source file:org.apache.sysml.runtime.controlprogram.paramserv.ParamServer.java

ParamServer(ListObject model, String aggFunc, Statement.PSUpdateType updateType, ExecutionContext ec,
        int workerNum) {
    _gradientsQueue = new LinkedBlockingDeque<>();
    _modelMap = new HashMap<>(workerNum);
    IntStream.range(0, workerNum).forEach(i -> {
        // Create a single element blocking queue for workers to receive the broadcasted model
        _modelMap.put(i, new ArrayBlockingQueue<>(1));
    });/*from   www. ja  va  2s  .co m*/
    _model = model;
    _aggService = new AggregationService(aggFunc, updateType, ec, workerNum);
    try {
        _aggService.broadcastModel();
    } catch (InterruptedException e) {
        throw new DMLRuntimeException("Param server: failed to broadcast the initial model.", e);
    }
    BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("agg-service-pool-thread-%d")
            .build();
    _es = Executors.newSingleThreadExecutor(factory);
}

From source file:com.btoddb.chronicle.catchers.RestCatcherImpl.java

private void startJettyServer() {
    QueuedThreadPool tp = new QueuedThreadPool(200, 8, 30000, new ArrayBlockingQueue<Runnable>(1000));
    tp.setName("Chronicle-RestV1-ThreadPool");

    server = new Server(tp);

    // Setup JMX - do this before setting *anything* else on the server object
    MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer());
    server.addEventListener(mbContainer);
    server.addBean(mbContainer);/*  ww  w.ja v a 2s. c om*/
    server.addBean(Log.getLogger(RestCatcherImpl.class));

    // bind connector to IP and port
    ServerConnector connector = new ServerConnector(server);
    connector.setHost(bind);
    connector.setPort(port);
    server.setConnectors(new Connector[] { connector });

    // setup handlers/stats for the context "/v1/events"
    HandlerCollection v1EventHandlers = new HandlerCollection();
    v1EventHandlers.addHandler(new RequestHandler());
    v1EventHandlers.addHandler(new DefaultHandler());
    StatisticsHandler statsHandler = new StatisticsHandler();
    statsHandler.setHandler(v1EventHandlers);

    ContextHandler context = new ContextHandler();
    context.setDisplayName("Chronicle-RestV1");
    context.setContextPath("/v1/events");
    context.setAllowNullPathInfo(true); // to avoid redirect on POST
    context.setHandler(statsHandler);

    // setup handlers/contexts for the overall server
    HandlerCollection serverHandlers = new HandlerCollection();
    serverHandlers.addHandler(context);
    server.setHandler(serverHandlers);
    server.addBean(new ErrorHandler() {
        @Override
        public void handle(String target, Request baseRequest, HttpServletRequest request,
                HttpServletResponse response) throws IOException {
            logger.warn(String.format("RESTv1 response code = %d : %s : forward-for=%s : %s",
                    baseRequest.getResponse().getStatus(), baseRequest.getRemoteAddr(),
                    baseRequest.getHeader("X-Forwarded-For"), baseRequest.getRequestURL()));
            super.handle(target, baseRequest, request, response);
        }
    });

    try {
        server.start();
        logger.info("jetty started and listening on port " + port);
    } catch (Exception e) {
        logger.error("exception while starting jetty server", e);
    }
}

From source file:com.alibaba.napoli.metamorphosis.client.consumer.RecoverStorageManager.java

public RecoverStorageManager(final MetaClientConfig metaClientConfig,
        final SubscribeInfoManager subscribeInfoManager) {
    super();//w  w  w . j  a v a  2s .  co m

    // caller run
    this.threadPoolExecutor = new ThreadPoolExecutor(metaClientConfig.getRecoverThreadCount(),
            metaClientConfig.getRecoverThreadCount(), 60, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(100), new NamedThreadFactory("Recover-thread"),
            new ThreadPoolExecutor.CallerRunsPolicy());
    // this.startRecover(metaClientConfig);
    this.makeDataDir();
    this.subscribeInfoManager = subscribeInfoManager;
    this.loadStores();
}

From source file:com.taobao.adfs.database.tdhsocket.client.statement.BatchStatementImpl.java

public TDHSResponse[] commit() throws TDHSException {
    ByteArrayOutputStream retData = new ByteArrayOutputStream(2 * 1024);
    long headerId = id.getAndIncrement();
    try {//from  w w  w .j a va2s .c om
        try {
            for (internal_struct is : batchRequest) {
                retData.write(is.getPacket().toByteArray());
                responses.put(is.getPacket().getSeqId(), new ArrayBlockingQueue<BasePacket>(1));
            }
        } catch (IOException e) {
            throw new TDHSException(e);
        }
        BasePacket headerPacket = new BasePacket(TDHSCommon.RequestType.BATCH, headerId, batchRequest.size(),
                retData.toByteArray());
        ArrayBlockingQueue<BasePacket> queue = new ArrayBlockingQueue<BasePacket>(1);
        responses.put(headerId, queue);
        tdhsNet.write(headerPacket);
        return do_real_response(queue);
    } finally {
        responses.remove(headerId);
        for (internal_struct is : batchRequest) {
            responses.remove(is.getPacket().getSeqId());
        }

    }
}

From source file:org.apache.ranger.audit.queue.AuditBatchQueue.java

@Override
synchronized public void start() {
    if (consumerThread != null) {
        logger.error("Provider is already started. name=" + getName());
        return;/*  w w  w. j  a va 2  s. c o m*/
    }
    logger.info("Creating ArrayBlockingQueue with maxSize=" + getMaxQueueSize());
    queue = new ArrayBlockingQueue<AuditEventBase>(getMaxQueueSize());

    // Start the consumer first
    consumer.start();

    // Then the FileSpooler
    if (fileSpoolerEnabled) {
        fileSpooler.start();
    }

    // Finally the queue listener
    consumerThread = new Thread(this, this.getClass().getName() + (threadCount++));
    consumerThread.setDaemon(true);
    consumerThread.start();

}

From source file:com.kurento.kmf.media.HttpGetEndpointTest.java

/**
 * Test for {@link MediaSessionStartedEvent}
 * /*from w  w  w. j  av  a 2 s.  c  o m*/
 * @throws InterruptedException
 */
@Test
public void testEventMediaSessionStarted() throws InterruptedException {
    final PlayerEndpoint player = pipeline.newPlayerEndpoint(URL_SMALL).build();
    HttpGetEndpoint httpEP = pipeline.newHttpGetEndpoint().build();
    player.connect(httpEP);

    final BlockingQueue<EndOfStreamEvent> eosEvents = new ArrayBlockingQueue<EndOfStreamEvent>(1);
    player.addEndOfStreamListener(new MediaEventListener<EndOfStreamEvent>() {

        @Override
        public void onEvent(EndOfStreamEvent event) {
            eosEvents.add(event);
        }
    });

    httpEP.addMediaSessionStartedListener(new MediaEventListener<MediaSessionStartedEvent>() {

        @Override
        public void onEvent(MediaSessionStartedEvent event) {
            player.play();
        }
    });

    DefaultHttpClient httpclient = new DefaultHttpClient();
    try {
        // This should trigger MediaSessionStartedEvent
        httpclient.execute(new HttpGet(httpEP.getUrl()));
    } catch (ClientProtocolException e) {
        throw new KurentoMediaFrameworkException();
    } catch (IOException e) {
        throw new KurentoMediaFrameworkException();
    }

    Assert.assertNotNull(eosEvents.poll(7, SECONDS));

    httpEP.release();
    player.release();
}

From source file:org.apache.hadoop.raid.JRSDecoder.java

/**
 * @param forRecovery determine the type of this decoder, for recovery or for degraded read 
 *   (someday, i may combine them into the same function)
 *//*ww w  .j  a v a  2s .c o  m*/
public JRSDecoder(Configuration conf, int stripeSize, int paritySize, boolean forRecovery) {
    super(conf, stripeSize, paritySize);

    LOG.info("initial decoder: k=" + stripeSize + " m=" + paritySize + " bufSize:" + bufSize);
    threadNum = conf.getInt("hdfs.raid.decoder.threadnum", 1);

    //data queue, input to decode
    this.q = new BlockingQueue[threadNum];
    for (int i = 0; i < threadNum; i++)
        q[i] = new ArrayBlockingQueue<ByteBuffer>(1024);

    //signal queue, decode to output
    this.p = new BlockingQueue[threadNum];
    for (int i = 0; i < threadNum; i++)
        p[i] = new ArrayBlockingQueue<Integer>(65);

    //decode threads
    Thread[] ds = new Thread[threadNum];
    for (int i = 0; i < threadNum; i++) {
        if (forRecovery) {
            JRSRecoveryDecoder decoder = new JRSRecoveryDecoder(i);
            ds[i] = new Thread(decoder);
        } else {
            JRSDegradedReadDecoder decoder = new JRSDegradedReadDecoder(i);
            ds[i] = new Thread(decoder);
        }
        ds[i].start();
    }

    LOG.info("JRSDecoder 1/1");
}