Example usage for java.util.concurrent Executors newSingleThreadScheduledExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadScheduledExecutor.

Prototype

public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory) 

Source Link

Document

Creates a single-threaded executor that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:org.rhq.enterprise.server.core.plugin.PluginDeploymentScanner.java

public void start() throws Exception {
    // This will check to see if there are any agent plugin records in the database
    // that do not have content associated with them and if so, will stream
    // the content from the file system to the database. This is needed only
    // in the case when this server has recently been upgraded from an old
    // version of the software that did not originally have content stored in the DB.
    // Once we do that, we can start the agent plugin deployer.
    this.agentPluginScanner.fixMissingAgentPluginContent();
    this.agentPluginScanner.getAgentPluginDeployer().start();

    shutdownPoller(); // paranoia - just in case somehow one is still running
    this.poller = Executors.newSingleThreadScheduledExecutor(new LoggingThreadFactory("PluginScanner", true));
    return;//  w w w. j  a v a  2  s  .com
}

From source file:org.madsonic.service.PodcastService.java

public PodcastService() {
    ThreadFactory threadFactory = new ThreadFactory() {
        public Thread newThread(Runnable r) {
            Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);/*from w w  w .  java2  s.com*/
            return t;
        }
    };
    refreshExecutor = Executors.newFixedThreadPool(5, threadFactory);
    downloadExecutor = Executors.newFixedThreadPool(4, threadFactory);

    //settingsService.getPodcastEpisodeDownloadLimit()

    scheduledExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory);
}

From source file:org.xdi.oxauth.util.ServerUtil.java

public static ScheduledExecutorService createExecutor() {
    return Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        public Thread newThread(Runnable p_r) {
            Thread thread = new Thread(p_r);
            thread.setDaemon(true);//w  ww .  j  a  v a2 s  . c om
            return thread;
        }
    });
}

From source file:com.yahoo.omid.tso.TSOHandler.java

public void start() {
    this.flushThread = new FlushThread();
    this.scheduledExecutor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        @Override//from  w w w.j av  a 2 s. co m
        public Thread newThread(Runnable r) {
            Thread t = new Thread(Thread.currentThread().getThreadGroup(), r);
            t.setDaemon(true);
            t.setName("Flush Thread");
            return t;
        }
    });
    this.flushFuture = scheduledExecutor.schedule(flushThread, TSOState.FLUSH_TIMEOUT, TimeUnit.MILLISECONDS);
    this.executor = Executors.newSingleThreadExecutor();
}

From source file:com.twitter.hraven.hadoopJobMonitor.HadoopJobMonitorService.java

public void init() {
    YarnConfiguration yConf = new YarnConfiguration();
    DefaultMetricsSystem.initialize("HadoopJobMonitor");
    String logDir = System.getProperty("hadoopJobMonitor.log.dir");
    if (logDir == null)
        logDir = "/tmp";
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    try {/* w w w  .j  a  v a2s  .  c  om*/
        ObjectName name = new ObjectName(
                "com.twitter.hraven.hadoopJobMonitor.jmx:type=" + WhiteList.class.getSimpleName());
        WhiteList.init(logDir);
        WhiteList mbean = WhiteList.getInstance();
        mbs.registerMBean(mbean, name);
        LOG.error("Current whitelist is: \n" + mbean.getExpirations());
    } catch (Exception e) {
        LOG.fatal("Error in retriving white list from dir " + logDir, e);
    }

    metrics = HadoopJobMonitorMetrics.initSingleton(conf);

    rmDelegate = new ResourceMgrDelegate(yConf);
    clientCache = new ClientCache(conf, rmDelegate);
    AppConfCache.init(conf);
    ProgressCache.init(conf);
    Mail.init(conf);
    Notifier.init(conf);
    clusterCheckerExecutor = Executors
            .newSingleThreadScheduledExecutor(new ClusterStatusChecker.SimpleThreadFactory());
    int concurrentAppCheckers = conf.getInt(HadoopJobMonitorConfiguration.NEW_APP_CHECKER_CONCURRENCY,
            HadoopJobMonitorConfiguration.DEFAULT_NEW_APP_CHECKER_CONCURRENCY);
    appCheckerExecutor = new BlockingExecutor(concurrentAppCheckers,
            new AppStatusChecker.SimpleThreadFactory());
}

From source file:org.rhq.core.system.SigarAccessHandler.java

SigarAccessHandler(SigarFactory sigarFactory) {
    this.sigarFactory = sigarFactory;
    sharedSigarLock = new ReentrantLock();
    localSigarLock = new ReentrantLock();
    scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {

        private ThreadFactory defaultThreadFactory = Executors.defaultThreadFactory();

        private AtomicInteger threadCounter = new AtomicInteger(0);

        @Override/* w  w  w .j a v a2 s.  c  om*/
        public Thread newThread(Runnable runnable) {
            Thread thread = defaultThreadFactory.newThread(runnable);
            thread.setName("SigarAccessHandler-" + threadCounter.incrementAndGet());
            // With daemon threads, there is no need to call #shutdown on the executor to let the JVM go down
            thread.setDaemon(true);
            return thread;
        }
    });
    scheduledExecutorService.scheduleWithFixedDelay(new ThresholdChecker(), 1, 5, MINUTES);
    localSigarInstancesCount = 0;
    closed = false;
}

From source file:io.gravitee.gateway.service.ratelimit.AsyncRateLimitService.java

@Override
protected void doStart() throws Exception {
    super.doStart();

    DefaultListableBeanFactory beanFactory = (DefaultListableBeanFactory) ((ConfigurableApplicationContext) applicationContext)
            .getBeanFactory();// w  ww.j a  v  a2  s .  c  om
    DefaultListableBeanFactory parentBeanFactory = (DefaultListableBeanFactory) ((ConfigurableApplicationContext) applicationContext
            .getParent()).getBeanFactory();

    // Retrieve the current rate-limit repository implementation
    RateLimitRepository rateLimitRepository = parentBeanFactory.getBean(RateLimitRepository.class);
    LOGGER.debug("Rate-limit repository implementation is {}", rateLimitRepository.getClass().getName());

    if (enabled) {
        // Prepare caches
        RateLimitRepository aggregateCacheRateLimitRepository = new CachedRateLimitRepository(aggregateCache);
        RateLimitRepository localCacheRateLimitRepository = new CachedRateLimitRepository(localCache);

        // Prepare queue to flush data into the final repository implementation
        BlockingQueue<RateLimit> rateLimitsQueue = new BlockingArrayQueue<>(queueCapacity);

        LOGGER.debug("Register rate-limit repository asynchronous implementation {}",
                AsyncRateLimitRepository.class.getName());
        AsyncRateLimitRepository asyncRateLimitRepository = new AsyncRateLimitRepository();
        beanFactory.autowireBean(asyncRateLimitRepository);
        asyncRateLimitRepository.setLocalCacheRateLimitRepository(localCacheRateLimitRepository);
        asyncRateLimitRepository.setAggregateCacheRateLimitRepository(aggregateCacheRateLimitRepository);
        asyncRateLimitRepository.setRateLimitsQueue(rateLimitsQueue);

        LOGGER.info("Register the rate-limit service bridge for synchronous and asynchronous mode");
        DefaultRateLimitService rateLimitService = new DefaultRateLimitService();
        rateLimitService.setRateLimitRepository(rateLimitRepository);
        rateLimitService.setAsyncRateLimitRepository(asyncRateLimitRepository);
        parentBeanFactory.registerSingleton(RateLimitService.class.getName(), rateLimitService);

        // Prepare and start rate-limit poller
        rateLimitPollerExecutor = Executors
                .newSingleThreadScheduledExecutor(r -> new Thread(r, "rate-limit-poller"));
        RateLimitPoller rateLimitPoller = new RateLimitPoller();
        beanFactory.autowireBean(rateLimitPoller);
        rateLimitPoller.setRateLimitRepository(rateLimitRepository);
        rateLimitPoller.setAggregateCacheRateLimitRepository(aggregateCacheRateLimitRepository);

        LOGGER.info("Schedule rate-limit poller at fixed rate: {} {}", polling, TimeUnit.MILLISECONDS);
        rateLimitPollerExecutor.scheduleAtFixedRate(rateLimitPoller, 0L, polling, TimeUnit.MILLISECONDS);

        // Prepare and start rate-limit updater
        rateLimitUpdaterExecutor = Executors.newSingleThreadExecutor(r -> new Thread(r, "rate-limit-updater"));
        RateLimitUpdater rateLimitUpdater = new RateLimitUpdater(rateLimitsQueue);
        beanFactory.autowireBean(rateLimitUpdater);
        rateLimitUpdater.setRateLimitRepository(rateLimitRepository);

        LOGGER.info("Start rate-limit updater");
        rateLimitUpdaterExecutor.submit(rateLimitUpdater);
    } else {
        // By disabling async and cached rate limiting, only the strict mode is allowed
        LOGGER.info("Register the rate-limit service bridge for strict mode only");
        DefaultRateLimitService rateLimitService = new DefaultRateLimitService();
        rateLimitService.setRateLimitRepository(rateLimitRepository);
        rateLimitService.setAsyncRateLimitRepository(rateLimitRepository);
        parentBeanFactory.registerSingleton(RateLimitService.class.getName(), rateLimitService);
    }
}

From source file:org.wso2.andes.kernel.MessageFlusher.java

/**
 * Initialize the delivery filter chain/*from   w  w  w  .ja v a  2  s. c  o  m*/
 */
private void initializeDeliveryResponsibilityComponents() {
    //assign the head of the handler chain
    deliveryResponsibilityHead = new PurgedMessageHandler();
    ExpiredMessageHandler expiredMessageHandler = new ExpiredMessageHandler();
    //link the second handler to the head
    deliveryResponsibilityHead.setNextDeliveryFilter(expiredMessageHandler);
    //link the third handler
    expiredMessageHandler.setNextDeliveryFilter(new DeliveryMessageHandler());

    int preDeliveryDeletionTaskScheduledPeriod = AndesConfigurationManager
            .readValue(AndesConfiguration.PERFORMANCE_TUNING_PRE_DELIVERY_EXPIRY_DELETION_INTERVAL);
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ExpiryMessageDeletionTask-%d")
            .build();
    //executor service for pre delivery deletion task
    ScheduledExecutorService expiryMessageDeletionTaskScheduler = Executors
            .newSingleThreadScheduledExecutor(namedThreadFactory);
    //pre-delivery deletion task initialization
    PreDeliveryExpiryMessageDeletionTask preDeliveryExpiryMessageDeletionTask = new PreDeliveryExpiryMessageDeletionTask();
    //Set the expiry message deletion task to the expired message handler
    expiredMessageHandler.setExpiryMessageDeletionTask(preDeliveryExpiryMessageDeletionTask);
    //schedule the task at the specified intervals
    expiryMessageDeletionTaskScheduler.scheduleAtFixedRate(preDeliveryExpiryMessageDeletionTask,
            preDeliveryDeletionTaskScheduledPeriod, preDeliveryDeletionTaskScheduledPeriod, TimeUnit.SECONDS);

}

From source file:com.opengamma.component.tool.ToolContextUtils.java

private static ToolContext createToolContextByHttp(String configResourceLocation,
        Class<? extends ToolContext> toolContextClazz, List<String> classifierChain) {
    configResourceLocation = StringUtils.stripEnd(configResourceLocation, "/");
    if (configResourceLocation.endsWith("/jax") == false) {
        configResourceLocation += "/jax";
    }//from  w w w.j a  v a2  s.c o m

    // Get the remote component server using the supplied URI
    RemoteComponentServer remoteComponentServer = new RemoteComponentServer(URI.create(configResourceLocation));
    ComponentServer componentServer = remoteComponentServer.getComponentServer();

    // Attempt to build a tool context of the specified type
    ToolContext toolContext;
    try {
        toolContext = toolContextClazz.newInstance();
    } catch (Throwable t) {
        return null;
    }

    // Populate the tool context from the remote component server
    for (MetaProperty<?> metaProperty : toolContext.metaBean().metaPropertyIterable()) {
        if (!metaProperty.name().equals("contextManager")) {
            try {
                ComponentInfo componentInfo = getComponentInfo(componentServer, classifierChain,
                        metaProperty.propertyType());
                if (componentInfo == null) {
                    s_logger.warn("Unable to populate tool context '" + metaProperty.name()
                            + "', no appropriate component found on the server");
                    continue;
                }
                if (ViewProcessor.class.equals(componentInfo.getType())) {
                    final JmsConnector jmsConnector = createJmsConnector(componentInfo);
                    final ScheduledExecutorService scheduler = Executors
                            .newSingleThreadScheduledExecutor(new NamedThreadFactory("rvp"));
                    ViewProcessor vp = new RemoteViewProcessor(componentInfo.getUri(), jmsConnector, scheduler);
                    toolContext.setViewProcessor(vp);
                    toolContext.setContextManager(new Closeable() {
                        @Override
                        public void close() throws IOException {
                            scheduler.shutdownNow();
                            jmsConnector.close();
                        }
                    });
                } else {
                    String clazzName = componentInfo.getAttribute(ComponentInfoAttributes.REMOTE_CLIENT_JAVA);
                    if (clazzName == null) {
                        s_logger.warn("Unable to populate tool context '" + metaProperty.name()
                                + "', no remote access class found");
                        continue;
                    }
                    Class<?> clazz = Class.forName(clazzName);
                    metaProperty.set(toolContext,
                            clazz.getConstructor(URI.class).newInstance(componentInfo.getUri()));
                    s_logger.info("Populated tool context '" + metaProperty.name() + "' with "
                            + metaProperty.get(toolContext));
                }
            } catch (Throwable ex) {
                s_logger.warn(
                        "Unable to populate tool context '" + metaProperty.name() + "': " + ex.getMessage());
            }
        }
    }
    return toolContext;
}

From source file:org.codice.ddf.security.crl.generator.CrlGenerator.java

public CrlGenerator(ClientFactoryFactory factory, EventAdmin eventAdmin) {
    this.factory = factory;
    this.eventAdmin = eventAdmin;
    this.scheduler = Executors
            .newSingleThreadScheduledExecutor(StandardThreadFactoryBuilder.newThreadFactory("crlThread"));
}