Example usage for java.util.concurrent SynchronousQueue SynchronousQueue

List of usage examples for java.util.concurrent SynchronousQueue SynchronousQueue

Introduction

In this page you can find the example usage for java.util.concurrent SynchronousQueue SynchronousQueue.

Prototype

public SynchronousQueue() 

Source Link

Document

Creates a SynchronousQueue with nonfair access policy.

Usage

From source file:com.cloud.agent.Agent.java

public Agent(final IAgentShell shell, final int localAgentId, final ServerResource resource)
        throws ConfigurationException {
    _shell = shell;//from   www  .j a  va  2  s.  c om
    _resource = resource;
    _link = null;

    resource.setAgentControl(this);

    final String value = _shell.getPersistentProperty(getResourceName(), "id");
    _id = value != null ? Long.parseLong(value) : null;
    s_logger.info("id is " + (_id != null ? _id : ""));

    final Map<String, Object> params = PropertiesUtil.toMap(_shell.getProperties());

    // merge with properties from command line to let resource access command line parameters
    for (final Map.Entry<String, Object> cmdLineProp : _shell.getCmdLineProperties().entrySet()) {
        params.put(cmdLineProp.getKey(), cmdLineProp.getValue());
    }

    if (!_resource.configure(getResourceName(), params)) {
        throw new ConfigurationException("Unable to configure " + _resource.getName());
    }

    final String host = _shell.getHost();
    _connection = new NioClient("Agent", host, _shell.getPort(), _shell.getWorkers(), this);

    // ((NioClient)_connection).setBindAddress(_shell.getPrivateIp());

    s_logger.debug("Adding shutdown hook");
    Runtime.getRuntime().addShutdownHook(new ShutdownThread(this));

    _ugentTaskPool = new ThreadPoolExecutor(shell.getPingRetries(), 2 * shell.getPingRetries(), 10,
            TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), new NamedThreadFactory("UgentTask"));

    _executor = new ThreadPoolExecutor(_shell.getWorkers(), 5 * _shell.getWorkers(), 1, TimeUnit.DAYS,
            new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("agentRequest-Handler"));

    s_logger.info("Agent [id = " + (_id != null ? _id : "new") + " : type = " + getResourceName() + " : zone = "
            + _shell.getZone() + " : pod = " + _shell.getPod() + " : workers = " + _shell.getWorkers()
            + " : host = " + host + " : port = " + _shell.getPort());
}

From source file:org.apache.hadoop.hive.llap.daemon.impl.TaskExecutorService.java

public TaskExecutorService(int numExecutors, int waitQueueSize, String waitQueueComparatorClassName,
        boolean enablePreemption, ClassLoader classLoader, final LlapDaemonExecutorMetrics metrics,
        Clock clock) {/*from w  ww  .  java 2s  . co m*/
    super(TaskExecutorService.class.getSimpleName());
    LOG.info("TaskExecutorService is being setup with parameters: " + "numExecutors=" + numExecutors
            + ", waitQueueSize=" + waitQueueSize + ", waitQueueComparatorClassName="
            + waitQueueComparatorClassName + ", enablePreemption=" + enablePreemption);

    final Comparator<TaskWrapper> waitQueueComparator = createComparator(waitQueueComparatorClassName);
    this.maxParallelExecutors = numExecutors;
    this.waitQueue = new EvictingPriorityBlockingQueue<>(waitQueueComparator, waitQueueSize);
    this.clock = clock == null ? new MonotonicClock() : clock;
    this.threadPoolExecutor = new ThreadPoolExecutor(numExecutors, // core pool size
            numExecutors, // max pool size
            1, TimeUnit.MINUTES, new SynchronousQueue<Runnable>(), // direct hand-off
            new ExecutorThreadFactory(classLoader));
    this.executorService = MoreExecutors.listeningDecorator(threadPoolExecutor);
    this.preemptionQueue = new PriorityBlockingQueue<>(numExecutors, new PreemptionQueueComparator());
    this.enablePreemption = enablePreemption;
    this.numSlotsAvailable = new AtomicInteger(numExecutors);
    this.metrics = metrics;
    if (metrics != null) {
        metrics.setNumExecutorsAvailable(numSlotsAvailable.get());
    }

    // single threaded scheduler for tasks from wait queue to executor threads
    ExecutorService wes = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat(WAIT_QUEUE_SCHEDULER_THREAD_NAME_FORMAT).build());
    this.waitQueueExecutorService = MoreExecutors.listeningDecorator(wes);

    ExecutorService executionCompletionExecutorServiceRaw = Executors.newFixedThreadPool(1,
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ExecutionCompletionThread #%d").build());
    executionCompletionExecutorService = MoreExecutors
            .listeningDecorator(executionCompletionExecutorServiceRaw);
    ListenableFuture<?> future = waitQueueExecutorService.submit(new WaitQueueWorker());
    Futures.addCallback(future, new WaitQueueWorkerCallback());
}

From source file:com.offbynull.portmapper.upnpigd.UpnpIgdDiscovery.java

private static Map<UpnpIgdDevice, byte[]> getRootXmlForEachDevice(Set<UpnpIgdDevice> devices)
        throws InterruptedException {
    Map<UpnpIgdDevice, byte[]> serviceRoots = new HashMap();

    ExecutorService executorService = null;
    try {/*www .j  a va 2 s.co m*/
        int maximumPoolSize = (int) ((double) Runtime.getRuntime().availableProcessors() / (1.0 - 0.95));
        executorService = new ThreadPoolExecutor(0, maximumPoolSize, 1, TimeUnit.SECONDS,
                new SynchronousQueue<Runnable>());

        List<HttpRequestCallable<UpnpIgdDevice>> tasks = new LinkedList<>();
        for (UpnpIgdDevice device : devices) {
            tasks.add(new HttpRequestCallable<>(device.getUrl(), device));
        }

        List<Future<Pair<UpnpIgdDevice, byte[]>>> results = executorService.invokeAll(tasks);

        for (Future<Pair<UpnpIgdDevice, byte[]>> result : results) {
            try {
                Pair<UpnpIgdDevice, byte[]> data = result.get();
                serviceRoots.put(data.getKey(), data.getValue());
            } catch (InterruptedException | ExecutionException | CancellationException e) { // NOPMD
                // do nothing, skip
            }
        }
    } finally {
        if (executorService != null) {
            executorService.shutdownNow();
        }
    }

    return serviceRoots;
}

From source file:fr.xebia.springframework.concurrent.ThreadPoolExecutorFactory.java

@Override
protected ThreadPoolExecutor createInstance() throws Exception {
    Assert.isTrue(this.corePoolSize >= 0, "corePoolSize must be greater than or equal to zero");
    Assert.isTrue(this.maximumPoolSize > 0, "maximumPoolSize must be greater than zero");
    Assert.isTrue(this.maximumPoolSize >= this.corePoolSize,
            "maximumPoolSize must be greater than or equal to corePoolSize");
    Assert.isTrue(this.queueCapacity >= 0, "queueCapacity must be greater than or equal to zero");

    CustomizableThreadFactory threadFactory = new CustomizableThreadFactory(this.beanName + "-");
    threadFactory.setDaemon(true);//from w  ww.jav  a 2 s  .c o  m

    BlockingQueue<Runnable> blockingQueue;
    if (queueCapacity == 0) {
        blockingQueue = new SynchronousQueue<Runnable>();
    } else {
        blockingQueue = new LinkedBlockingQueue<Runnable>(queueCapacity);
    }
    ThreadPoolExecutor instance = new SpringJmxEnabledThreadPoolExecutor(corePoolSize, //
            maximumPoolSize, //
            keepAliveTimeInSeconds, //
            TimeUnit.SECONDS, //
            blockingQueue, //
            threadFactory, //
            rejectedExecutionHandlerClass.newInstance(), //
            new ObjectName("java.util.concurrent:type=ThreadPoolExecutor,name=" + beanName));

    return instance;
}

From source file:org.apache.kylin.job.impl.threadpool.DefaultScheduler.java

@Override
public synchronized void init(JobEngineConfig jobEngineConfig) throws SchedulerException {
    if (!initialized) {
        initialized = true;//from w w w.j  a  v a  2 s  . c om
    } else {
        return;
    }
    String ZKConnectString = getZKConnectString(jobEngineConfig);
    if (StringUtils.isEmpty(ZKConnectString)) {
        throw new IllegalArgumentException("ZOOKEEPER_QUORUM is empty!");
    }

    this.jobEngineConfig = jobEngineConfig;
    RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3);
    this.zkClient = CuratorFrameworkFactory.newClient(ZKConnectString, retryPolicy);
    this.zkClient.start();
    this.sharedLock = new InterProcessMutex(zkClient, schedulerId());
    boolean hasLock = false;
    try {
        hasLock = sharedLock.acquire(3, TimeUnit.SECONDS);
    } catch (Exception e) {
        logger.warn("error acquire lock", e);
    }
    if (!hasLock) {
        logger.warn("fail to acquire lock, scheduler has not been started");
        zkClient.close();
        return;
    }
    executableManager = ExecutableManager.getInstance(jobEngineConfig.getConfig());
    //load all executable, set them to a consistent status
    fetcherPool = Executors.newScheduledThreadPool(1);
    int corePoolSize = jobEngineConfig.getMaxConcurrentJobLimit();
    jobPool = new ThreadPoolExecutor(corePoolSize, corePoolSize, Long.MAX_VALUE, TimeUnit.DAYS,
            new SynchronousQueue<Runnable>());
    context = new DefaultContext(Maps.<String, Executable>newConcurrentMap(), jobEngineConfig.getConfig());

    for (AbstractExecutable executable : executableManager.getAllExecutables()) {
        if (executable.getStatus() == ExecutableState.READY) {
            executableManager.updateJobOutput(executable.getId(), ExecutableState.ERROR, null,
                    "scheduler initializing work to reset job to ERROR status");
        }
    }
    executableManager.updateAllRunningJobsToError();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            logger.debug("Closing zk connection");
            try {
                shutdown();
            } catch (SchedulerException e) {
                logger.error("error shutdown scheduler", e);
            }
        }
    });

    fetcherPool.scheduleAtFixedRate(new FetcherRunner(), 10,
            ExecutableConstants.DEFAULT_SCHEDULER_INTERVAL_SECONDS, TimeUnit.SECONDS);
    hasStarted = true;
}

From source file:com.mirth.connect.connectors.tcp.TcpReceiver.java

@Override
public void onStart() throws ConnectorTaskException {
    disposing.set(false);/*www .j a v a  2  s  .co  m*/
    results.clear();
    clientReaders.clear();

    if (connectorProperties.isServerMode()) {
        // If we're in server mode, use the max connections property to initialize the thread pool
        executor = new ThreadPoolExecutor(0, maxConnections, 60L, TimeUnit.SECONDS,
                new SynchronousQueue<Runnable>());
    } else {
        // If we're in client mode, only a single thread is needed
        executor = Executors.newSingleThreadExecutor();
    }

    if (connectorProperties.isServerMode()) {
        try {
            createServerSocket();
        } catch (IOException e) {
            throw new ConnectorTaskException("Failed to create server socket (" + connectorProperties.getName()
                    + " \"Source\" on channel " + getChannelId() + ").", e);
        }
    }

    // Create the acceptor thread
    thread = new Thread(
            "TCP Receiver Server Acceptor Thread on " + getChannel().getName() + " (" + getChannelId() + ")") {
        @Override
        public void run() {
            while (getCurrentState() == DeployedState.STARTED) {
                Socket socket = null;

                if (connectorProperties.isServerMode()) {
                    // Server mode; wait to accept a client socket on the ServerSocket
                    try {
                        logger.debug("Waiting for new client socket (" + connectorProperties.getName()
                                + " \"Source\" on channel " + getChannelId() + ").");
                        socket = serverSocket.accept();
                        logger.trace("Accepted new socket: " + socket.getRemoteSocketAddress().toString()
                                + " -> " + socket.getLocalSocketAddress());
                    } catch (java.io.InterruptedIOException e) {
                        logger.debug("Interruption during server socket accept operation ("
                                + connectorProperties.getName() + " \"Source\" on channel " + getChannelId()
                                + ").", e);
                    } catch (Exception e) {
                        logger.debug("Error accepting new socket (" + connectorProperties.getName()
                                + " \"Source\" on channel " + getChannelId() + ").", e);
                    }
                } else {
                    // Client mode, manually initiate a client socket
                    try {
                        logger.debug("Initiating for new client socket (" + connectorProperties.getName()
                                + " \"Source\" on channel " + getChannelId() + ").");
                        if (connectorProperties.isOverrideLocalBinding()) {
                            socket = SocketUtil.createSocket(configuration, getLocalAddress(), getLocalPort());
                        } else {
                            socket = SocketUtil.createSocket(configuration);
                        }
                        clientSocket = socket;
                        SocketUtil.connectSocket(socket, getRemoteAddress(), getRemotePort(), timeout);
                    } catch (Exception e) {
                        logger.error("Error initiating new socket (" + connectorProperties.getName()
                                + " \"Source\" on channel " + getChannelId() + ").", e);
                        closeSocketQuietly(socket);
                        socket = null;
                        clientSocket = null;
                    }
                }

                try {
                    ThreadUtils.checkInterruptedStatus();

                    if (socket != null) {
                        synchronized (clientReaders) {
                            TcpReader reader = null;

                            try {
                                // Only allow worker threads to be submitted if we're not currently trying to stop the connector
                                if (disposing.get()) {
                                    return;
                                }
                                reader = new TcpReader(socket);
                                clientReaders.add(reader);
                                results.add(executor.submit(reader));
                            } catch (RejectedExecutionException | SocketException e) {
                                if (e instanceof RejectedExecutionException) {
                                    logger.debug("Executor rejected new task (" + connectorProperties.getName()
                                            + " \"Source\" on channel " + getChannelId() + ").", e);
                                } else {
                                    logger.debug("Error initializing socket (" + connectorProperties.getName()
                                            + " \"Source\" on channel " + getChannelId() + ").", e);
                                }
                                clientReaders.remove(reader);
                                closeSocketQuietly(socket);
                            }
                        }
                    }

                    if (connectorProperties.isServerMode()) {
                        // Remove any completed tasks from the list, but don't try to retrieve currently running tasks
                        cleanup(false, false, true);
                    } else {
                        // Wait until the TcpReader is done
                        cleanup(true, false, true);

                        String info = "Client socket finished, waiting "
                                + connectorProperties.getReconnectInterval() + " ms...";
                        eventController.dispatchEvent(new ConnectionStatusEvent(getChannelId(), getMetaDataId(),
                                getSourceName(), ConnectionStatusEventType.INFO, info));

                        // Use the reconnect interval to determine how long to wait until creating another socket
                        sleep(reconnectInterval);
                    }
                } catch (InterruptedException e) {
                    return;
                }
            }
        }
    };
    thread.start();
}

From source file:org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor.java

/**
 * Create the BlockingQueue to use for the ThreadPoolExecutor.
 * <p>A LinkedBlockingQueue instance will be created for a positive
 * capacity value; a SynchronousQueue else.
 * @param queueCapacity the specified queue capacity
 * @return the BlockingQueue instance/* ww  w .  j a  v a2s . co  m*/
 * @see java.util.concurrent.LinkedBlockingQueue
 * @see java.util.concurrent.SynchronousQueue
 */
protected BlockingQueue createQueue(int queueCapacity) {
    if (queueCapacity > 0) {
        return new LinkedBlockingQueue(queueCapacity);
    } else {
        return new SynchronousQueue();
    }
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHTable.java

public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) {
    int maxThreads = conf.getInt("hbase.crosssite.table.threads.max", Integer.MAX_VALUE);
    if (maxThreads <= 0) {
        maxThreads = Integer.MAX_VALUE;
    }/*from   ww  w . j  a v a  2s .co  m*/
    final SynchronousQueue<Runnable> blockingQueue = new SynchronousQueue<Runnable>();
    RejectedExecutionHandler rejectHandler = new RejectedExecutionHandler() {

        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                blockingQueue.put(r);
            } catch (InterruptedException e) {
                throw new RejectedExecutionException(e);
            }
        }
    };
    long keepAliveTime = conf.getLong("hbase.table.threads.keepalivetime", 60);
    ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
            blockingQueue, Threads.newDaemonThreadFactory("crosssite-hbase-table"), rejectHandler);
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
    return pool;
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java

public CrossSiteHBaseAdmin(Configuration conf) throws IOException, KeeperException {
    //    super();
    // create the connection to the global zk of the CrossSiteHBaseAdmin
    Configuration crossSiteZKConf = new Configuration(conf);
    ZKUtil.applyClusterKeyToConf(crossSiteZKConf, conf.get(CrossSiteConstants.CROSS_SITE_ZOOKEEPER));
    this.conf = crossSiteZKConf;
    zkw = new ZooKeeperWatcher(this.conf, "connection to global zookeeper", this, false);
    znodes = new CrossSiteZNodes(zkw);
    this.numRetries = this.conf.getInt("hbase.crosssite.client.retries.number", 5);
    this.retryLongerMultiplier = this.conf.getInt("hbase.crosssite.client.retries.longer.multiplier", 2);
    this.pause = this.conf.getLong("hbase.crosssite.client.pause", 1000);

    int poolSize = this.conf.getInt("hbase.crosssite.admin.pool.size", Integer.MAX_VALUE);
    if (poolSize <= 0) {
        poolSize = Integer.MAX_VALUE;
    }//from w  w  w  .j  ava2s  . co m
    final SynchronousQueue<Runnable> blockingQueue = new SynchronousQueue<Runnable>();
    RejectedExecutionHandler rejectHandler = new RejectedExecutionHandler() {
        @Override
        public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
            try {
                blockingQueue.put(r);
            } catch (InterruptedException e) {
                throw new RejectedExecutionException(e);
            }
        }
    };
    pool = new ThreadPoolExecutor(1, poolSize, 60, TimeUnit.SECONDS, blockingQueue,
            Threads.newDaemonThreadFactory("crosssite-hbase-admin-"), rejectHandler);
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
}

From source file:org.lizardirc.beancounter.Beancounter.java

private ExecutorService constructExecutorService() {
    BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("primaryListenerPool-thread%d")
            .daemon(true).build();/*  ww w.ja v a  2  s  . co m*/
    ThreadPoolExecutor ret = new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<>(), factory);
    ret.allowCoreThreadTimeOut(true);
    return ret;
}