Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:com.netflix.curator.framework.imps.CuratorTempFrameworkImpl.java

private synchronized void openConnectionIfNeeded() throws Exception {
    if (client == null) {
        client = (CuratorFrameworkImpl) factory.build(); // cast is safe - we control both sides of this
        client.start();/*w w w.  j a  va  2  s.  com*/
    }

    if (cleanup == null) {
        ThreadFactory threadFactory = factory.getThreadFactory();
        cleanup = (threadFactory != null) ? Executors.newScheduledThreadPool(1, threadFactory)
                : Executors.newScheduledThreadPool(1);

        Runnable command = new Runnable() {
            @Override
            public void run() {
                checkInactive();
            }
        };
        cleanup.scheduleAtFixedRate(command, inactiveThresholdMs, inactiveThresholdMs, TimeUnit.MILLISECONDS);
    }

    updateLastAccess();
}

From source file:com.openteach.diamond.network.waverider.session.DefaultSessionManager.java

@Override
public boolean init() {
    increment(config.getPreInitSessionCount());
    logger.info(new StringBuilder("Pre init ").append(config.getPreInitSessionCount()).append(" sessions.")
            .toString());//from   ww w.  j  a  v  a 2s .c  o m
    sessionRecycleScheduler = Executors.newScheduledThreadPool(1,
            new WaveriderThreadFactory(SESSION_RECYCLE_THREAD_NAME_PREFIX, null, false));
    return true;
}

From source file:com.clustercontrol.selfcheck.SelfCheckTaskSubmitter.java

public SelfCheckTaskSubmitter() {
    _scheduler = Executors.newScheduledThreadPool(1, new ThreadFactory() {

        @Override//from w  w w .  j ava 2s  .  co m
        public Thread newThread(Runnable r) {
            return new Thread(r, "SelfCheckScheduler");
        }
    });

    _executorService = Executors.newFixedThreadPool(
            HinemosPropertyUtil.getHinemosPropertyNum("selfcheck.threadpool.size", Long.valueOf(4)).intValue(),
            new ThreadFactory() {
                private volatile int _count = 0;

                @Override
                public Thread newThread(Runnable r) {
                    return new Thread(r, "SelfCheckWorker-" + _count++);
                }
            });
}

From source file:org.sourcepit.docker.watcher.DockerWatcher.java

public synchronized void start() {
    isTrue(client == null);/*www  .j  a va 2s.  c  o  m*/
    isTrue(eventObserverThread == null);
    isTrue(scheduler == null);

    final State state = new State() {
        @Override
        protected void handle(List<JsonObject> events) {
            DockerWatcher.this.handle(events);
        }
    };

    final BlockingQueue<JsonElement> queue = new LinkedBlockingQueue<>();

    client = clientFactory.createHttpClient();

    final FetchConatinersCommand fetchContainersCommand = new FetchConatinersCommand(client, uri) {
        @Override
        protected void handle(JsonArray status) {
            LOG.debug("Fetched: {}", status.toString());
            queue.add(status);
        }
    };

    final DockerEventObserver eventObserver = new DockerEventObserver(client, uri) {
        @Override
        protected void handle(JsonObject event) {
            queue.add(event);
        }
    };

    eventObserverThread = new Thread(eventObserver, "Docker Event Observer") {
        @Override
        public void interrupt() {
            eventObserver.die();
            super.interrupt();
        }
    };

    scheduler = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            return new Thread(r, "Docker State Fetcher");
        }
    });

    final SyncStateCommand syncStateCommand = new SyncStateCommand(queue) {

        @Override
        protected void requestNewStatus() {
            LOG.debug("Requesting new status.");
            scheduler.execute(fetchContainersCommand);
        }

        @Override
        protected void applyLastKnownState(JsonArray status) {
            LOG.debug("Applying new status: {}", status.toString());
            state.applyLastKnownState(status);
        }

    };

    scheduler.scheduleWithFixedDelay(fetchContainersCommand, 0, 30, TimeUnit.SECONDS);
    scheduler.scheduleWithFixedDelay(syncStateCommand, 0, 1, TimeUnit.SECONDS);
    eventObserverThread.start();
}

From source file:org.apache.nifi.remote.client.http.HttpClient.java

public HttpClient(final SiteToSiteClientConfig config) {
    super(config);

    peerSelector = new PeerSelector(this, config.getPeerPersistenceFile());
    peerSelector.setEventReporter(config.getEventReporter());

    taskExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        private final ThreadFactory defaultFactory = Executors.defaultThreadFactory();

        @Override//  w ww  .j  a v a  2  s  .c om
        public Thread newThread(final Runnable r) {
            final Thread thread = defaultFactory.newThread(r);
            thread.setName("Http Site-to-Site PeerSelector");
            thread.setDaemon(true);
            return thread;
        }
    });

    taskExecutor.scheduleWithFixedDelay(new Runnable() {
        @Override
        public void run() {
            peerSelector.refreshPeers();
        }
    }, 0, 5, TimeUnit.SECONDS);

}

From source file:se.svt.helios.serviceregistration.consul.ConsulServiceRegistrar.java

public ConsulServiceRegistrar(final ConsulClient consulClient, final String serviceCheckScript,
        final String serviceCheckInterval) {
    this.consulClient = consulClient;
    this.serviceCheckScript = serviceCheckScript;
    this.serviceCheckInterval = serviceCheckInterval;

    this.handles = Maps.newConcurrentMap();
    this.endpoints = Sets.newConcurrentHashSet();

    this.executor = MoreExecutors.getExitingScheduledExecutorService(
            (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
                    new ThreadFactoryBuilder().setNameFormat("consul-registrar-%d").build()),
            0, TimeUnit.SECONDS);

    // If the Consul agent is restarted, all services will forgotten. Therefore we sync the
    // state between services known by this plugin and services registered in Consul.
    Runnable registrationRunnable = new Runnable() {
        @Override//from w w  w. ja  va2 s . c  om
        public void run() {
            syncState();
        }
    };
    this.executor.scheduleAtFixedRate(registrationRunnable, CONSUL_UPDATE_INTERVAL, CONSUL_UPDATE_INTERVAL,
            TimeUnit.SECONDS);
}

From source file:org.wso2.carbon.cluster.coordinator.rdbms.RDBMSCoordinationStrategy.java

public RDBMSCoordinationStrategy() {
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("RDBMSCoordinationStrategy-%d")
            .build();//  w ww.  j a v a2 s  .c om
    this.threadExecutor = Executors
            .newScheduledThreadPool(
                    CoordinationStrategyConfiguration.getInstance().getRdbmsConfigs()
                            .get(CoordinationPropertyNames.RDBMS_BASED_PERFORM_TASK_THREAD_COUNT),
                    namedThreadFactory);
    this.heartBeatInterval = CoordinationStrategyConfiguration.getInstance().getRdbmsConfigs()
            .get(CoordinationPropertyNames.RDBMS_BASED_COORDINATION_HEARTBEAT_INTERVAL);
    // Maximum age of a heartbeat. After this much of time, the heartbeat is considered invalid and node is
    // considered to have left the cluster.
    this.heartbeatMaxAge = heartBeatInterval * 2;
    this.localNodeId = generateRandomId();
    this.rdbmsMemberEventProcessor = new RDBMSMemberEventProcessor(localNodeId);
    this.communicationBusContext = new RDBMSCommunicationBusContextImpl();
}

From source file:pt.lsts.neptus.util.logdownload.LogsDownloaderWorkerUtil.java

/**
 * Creates a {@link ScheduledThreadPoolExecutor} for use on {@link LogsDownloaderWorker}.
 * /*from  w  ww  .  jav a2  s . c  om*/
 * @param caller
 * @return
 */
static ScheduledThreadPoolExecutor createThreadPool(LogsDownloaderWorker caller) {
    ScheduledThreadPoolExecutor ret = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(4,
            new ThreadFactory() {
                private ThreadGroup group;
                private long count = 0;
                {
                    SecurityManager s = System.getSecurityManager();
                    group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup();
                }

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(group, r);
                    t.setName(caller.getClass().getSimpleName() + "::" + Integer.toHexString(caller.hashCode())
                            + "::" + count++);
                    t.setDaemon(true);
                    return t;
                }
            });

    return ret;
}

From source file:org.wso2.carbon.deployment.synchronizer.internal.DeploymentSynchronizationManager.java

/**
 * Initialize the RepositoryManager instance. The RepositoryManager must be initialized by
 * calling this method, before synchronizers can use it to schedule tasks.
 *
 * @param serverConfig Active Carbon ServerConfiguration
 *///from  w  w w  .  j a  v a 2 s.  com
void init(ServerConfiguration serverConfig) {
    if (log.isDebugEnabled()) {
        log.debug("Initializing deployment synchronization manager");
    }

    int poolSize = DeploymentSynchronizerConstants.DEFAULT_POOL_SIZE;
    String value = serverConfig.getFirstProperty(DeploymentSynchronizerConstants.POOL_SIZE);
    if (value != null) {
        poolSize = Integer.parseInt(value);
    }

    repositoryTaskExecutor = Executors.newScheduledThreadPool(poolSize, new SimpleThreadFactory());
}

From source file:org.compass.core.executor.concurrent.ConcurrentExecutorManager.java

public void configure(CompassSettings settings) throws CompassException {
    int corePoolSize = settings.getSettingAsInt(CompassEnvironment.ExecutorManager.Concurrent.CORE_POOL_SIZE,
            10);//from   w ww .j  a  va2 s .c o m
    int maximumPoolSize = settings
            .getSettingAsInt(CompassEnvironment.ExecutorManager.Concurrent.MAXIMUM_POOL_SIZE, 50);
    long keepAliveTime = settings
            .getSettingAsTimeInMillis(CompassEnvironment.ExecutorManager.Concurrent.KEEP_ALIVE_TIME, 60000);

    executorService = ScalingExecutros.newScalingThreadPool(corePoolSize, maximumPoolSize, keepAliveTime,
            new NamedThreadFactory("Compass Executor Thread", true));

    if (log.isDebugEnabled()) {
        log.debug("Using concurrent executor manager with core size [" + corePoolSize + "], max size ["
                + maximumPoolSize + "], and keep alive time [" + keepAliveTime + "ms]");
    }

    int scheduledCorePoolSize = settings
            .getSettingAsInt(CompassEnvironment.ExecutorManager.Concurrent.SCHEDULED_CORE_POOL_SIZE, 1);
    scheduledExecutorService = Executors.newScheduledThreadPool(scheduledCorePoolSize,
            new NamedThreadFactory("Compass Scheduled Executor Thread", true));

    if (log.isDebugEnabled()) {
        log.debug("Using concurrent executor manager scheduler with size [" + scheduledCorePoolSize + "]");
    }
}