Example usage for java.util.concurrent Executors newSingleThreadScheduledExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadScheduledExecutor.

Prototype

public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory) 

Source Link

Document

Creates a single-threaded executor that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:com.taobao.pushit.server.listener.ConnectionNumberListener.java

public ConnectionNumberListener(int connThreshold, int ipCountThreshold, int ipCheckTaskInterval) {
    this.connThreshold = connThreshold;
    this.ipCountThreshold = ipCountThreshold;
    this.ipCheckTaskInterval = ipCheckTaskInterval;
    this.scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {

        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("connection num control thread");
            t.setDaemon(true);/*w w  w.  ja  v  a  2s.c o  m*/
            return t;
        }
    });

    this.scheduler.scheduleAtFixedRate(new Runnable() {

        public void run() {
            int ipCount = ConnectionNumberListener.this.connectionIpNumMap.size();
            if (ipCount >= ConnectionNumberListener.this.ipCountThreshold) {
                log.warn("IP, IP, IP=" + ipCount + ", ="
                        + ConnectionNumberListener.this.ipCountThreshold);
                isOverflow = true;
            } else {
                isOverflow = false;
            }
        }

    }, this.ipCheckTaskInterval, this.ipCheckTaskInterval, TimeUnit.SECONDS);
}

From source file:com.garethahealy.jolokiajvmhawkular.core.EmbeddedHawkularMetricsAgent.java

public static void premain(String agentArgs) {
    CustomJvmAgent.premain(agentArgs);/* w ww. j a  va  2 s .  c  o  m*/

    try {
        LOG.info("About to load BackendManager...");

        Field jolokiaHttpHandlerField = FieldUtils.getDeclaredField(server.getClass(), "jolokiaHttpHandler",
                true);
        JolokiaHttpHandler jolokiaHttpHandler = (JolokiaHttpHandler) jolokiaHttpHandlerField.get(server);

        Field backendManagerField = FieldUtils.getDeclaredField(jolokiaHttpHandler.getClass(), "backendManager",
                true);
        BackendManager backendManager = (BackendManager) backendManagerField.get(jolokiaHttpHandler);

        HawkularClient client = getHawkularClient();
        if (client == null) {
            LOG.error(
                    "HawkularClient is null. Not starting HawkularMetricsService via ScheduledExecutorService.");
        } else {
            BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern("hawkular-metrics-%d")
                    .daemon(true).priority(Thread.MAX_PRIORITY).build();

            ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor(factory);
            exec.scheduleAtFixedRate(
                    new HawkularMetricsRunnable(new HawkularMetricsService(backendManager, client)), 15, 15,
                    TimeUnit.SECONDS);

            LOG.info("Started HawkularMetricsService via ScheduledExecutorService.");
        }
    } catch (IllegalAccessException e) {
        LOG.error("{}", e);
    }
}

From source file:com.linkedin.pinot.controller.helix.core.SegmentDeletionManager.java

SegmentDeletionManager(String localDiskDir, HelixAdmin helixAdmin, String helixClusterName,
        ZkHelixPropertyStore<ZNRecord> propertyStore) {
    _localDiskDir = localDiskDir;/*from  www. java2s  . c om*/
    _helixAdmin = helixAdmin;
    _helixClusterName = helixClusterName;
    _propertyStore = propertyStore;

    _executorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        @Override
        public Thread newThread(Runnable runnable) {
            Thread thread = new Thread(runnable);
            thread.setName("PinotHelixResourceManagerExecutorService");
            return thread;
        }
    });
}

From source file:com.googlecode.concurrentlinkedhashmap.MemoryLeakTest.java

@BeforeMethod
public void beforeMemoryLeakTest() {
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setPriority(Thread.MAX_PRIORITY).setDaemon(true)
            .build();/*from  w  w  w. j a  v  a  2  s.c o  m*/
    statusExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory);
    statusExecutor.scheduleAtFixedRate(newStatusTask(), statusInterval, statusInterval, SECONDS);
    map = new Builder<Long, Long>().maximumWeightedCapacity(threads).build();
}

From source file:rapture.series.children.cleanup.DefaultFolderCleanupService.java

DefaultFolderCleanupService() {
    repoIdToInfo = new HashMap<>();
    executor = Executors.newSingleThreadScheduledExecutor(
            new ThreadFactoryBuilder().setNameFormat("FolderCleanup").setDaemon(true).build());
    Integer initialDelay = ConfigLoader.getConf().folderCleanup.initialDelay;
    Integer delay = ConfigLoader.getConf().folderCleanup.delay;
    executor.scheduleWithFixedDelay(new Runnable() {
        @Override// w ww.  j av a 2s  .c om
        public void run() {
            try {
                runCleanup();
            } catch (Exception e) {
                log.error(ExceptionToString.format(e));
            }
        }
    }, initialDelay, delay, TimeUnit.MILLISECONDS);

}

From source file:org.eclipse.jubula.rc.javafx.util.concurrent.JBExecutors.java

/**
 * // www  .  j a  v a  2  s . c  o  m
 * @param poolName The prefix to use for naming worker threads. 
 *                 May not be <code>null</code>.
 * @return the newly created single-threaded daemon 
 *         {@link ScheduledExecutorService}.
 */
public static ScheduledExecutorService newSingleDaemonThreadScheduledExecutor(String poolName) {

    return Executors.newSingleThreadScheduledExecutor(daemonThreadFactory(poolName));
}

From source file:org.wso2.carbon.cluster.coordinator.rdbms.RDBMSMemberEventProcessor.java

public RDBMSMemberEventProcessor(String nodeId) {
    this.communicationBusContext = new RDBMSCommunicationBusContextImpl();
    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("ClusterEventReaderTask-%d")
            .build();//  ww  w. java 2s  .c om
    this.clusterMembershipReaderTaskScheduler = Executors.newSingleThreadScheduledExecutor(namedThreadFactory);
    addNewListenerTask(nodeId);
}

From source file:org.compass.core.lucene.engine.manager.ScheduledLuceneSearchEngineIndexManager.java

public void start() {
    indexManager.start();/*from   w  w w.  j a  v a 2 s.c  o m*/

    if (settings.getIndexManagerScheduleInterval() < 0) {
        log.info("Not starting scheduled index manager");
        return;
    }

    if (log.isInfoEnabled()) {
        log.info("Starting scheduled index manager with period [" + settings.getIndexManagerScheduleInterval()
                + "ms] daemon [true]");
    }
    scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(
            new SingleThreadThreadFactory("Compass Scheduled IndexManager", true));
    ScheduledIndexManagerRunnable scheduledIndexManagerRunnable = new ScheduledIndexManagerRunnable(
            indexManager);
    long period = settings.getIndexManagerScheduleInterval();
    scheduledExecutorService.scheduleWithFixedDelay(scheduledIndexManagerRunnable, period, period,
            TimeUnit.MILLISECONDS);

    // set the time to wait for clearing cache to 110% of the schedule time
    setWaitForCacheInvalidationBeforeSecondStep((long) (settings.getIndexManagerScheduleInterval() * 1.1));
}

From source file:com.github.benmanes.caffeine.cache.Stresser.java

public Stresser() {
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setPriority(Thread.MAX_PRIORITY).setDaemon(true)
            .build();//from w ww .j  a  v  a  2s  . com
    Executors.newSingleThreadScheduledExecutor(threadFactory).scheduleAtFixedRate(this::status, STATUS_INTERVAL,
            STATUS_INTERVAL, SECONDS);
    cache = Caffeine.newBuilder().maximumSize(operation.maxEntries).recordStats().build(key -> key);
    local = (BoundedLocalCache<Integer, Integer>) cache.asMap();
    ints = new Integer[TOTAL_KEYS];
    Arrays.setAll(ints, key -> {
        cache.put(key, key);
        return key;
    });
    cache.cleanUp();
    stopwatch = Stopwatch.createStarted();
    status();
}

From source file:org.compass.core.lucene.engine.optimizer.ScheduledLuceneSearchEngineOptimizer.java

public synchronized void start() throws SearchEngineException {
    if (isRunning()) {
        throw new IllegalStateException("Optimizer is already running");
    }//from www .  j  a v a 2s .  c  o  m

    this.optimizer.start();

    CompassSettings settings = getSearchEngineFactory().getSettings();
    boolean daemon = settings.getSettingAsBoolean(LuceneEnvironment.Optimizer.SCHEDULE_DEAMON, true);
    long period = (long) (settings.getSettingAsFloat(LuceneEnvironment.Optimizer.SCHEDULE_PERIOD, 10) * 1000);
    if (log.isInfoEnabled()) {
        log.info("Starting scheduled optimizer [" + optimizer.getClass() + "] with period [" + period
                + "ms] daemon [" + daemon + "]");
    }
    scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(
            new SingleThreadThreadFactory("Compass Scheduled Optimizer", daemon));
    ScheduledOptimizeRunnable scheduledOptimizeRunnable = new ScheduledOptimizeRunnable(
            getSearchEngineFactory().getOptimizer());
    scheduledExecutorService.scheduleWithFixedDelay(scheduledOptimizeRunnable, period, period,
            TimeUnit.MILLISECONDS);
}