Example usage for java.util.concurrent Executors newSingleThreadScheduledExecutor

List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor

Introduction

In this page you can find the example usage for java.util.concurrent Executors newSingleThreadScheduledExecutor.

Prototype

public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory) 

Source Link

Document

Creates a single-threaded executor that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:io.openmessaging.rocketmq.consumer.LocalMessageCache.java

LocalMessageCache(final DefaultMQPullConsumer rocketmqPullConsumer, final ClientConfig clientConfig) {
    consumeRequestCache = new LinkedBlockingQueue<>(clientConfig.getRmqPullMessageCacheCapacity());
    this.consumedRequest = new ConcurrentHashMap<>();
    this.pullOffsetTable = new ConcurrentHashMap<>();
    this.rocketmqPullConsumer = rocketmqPullConsumer;
    this.clientConfig = clientConfig;
    this.cleanExpireMsgExecutors = Executors
            .newSingleThreadScheduledExecutor(new ThreadFactoryImpl("OMS_CleanExpireMsgScheduledThread_"));
}

From source file:io.gravitee.gateway.services.monitoring.MonitoringService.java

@Override
protected void doStart() throws Exception {
    if (enabled) {
        super.doStart();
        LOGGER.info("Start gateway monitor");

        Event evt = prepareEvent();
        LOGGER.debug("Sending a {} event", evt.getType());
        heartbeatEvent = eventRepository.create(evt);

        executorService = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r, "gateway-monitor"));

        MonitorThread monitorThread = new MonitorThread(heartbeatEvent);
        this.applicationContext.getAutowireCapableBeanFactory().autowireBean(monitorThread);

        LOGGER.info("Monitoring scheduled with fixed delay {} {} ", delay, unit.name());

        ((ScheduledExecutorService) executorService).scheduleWithFixedDelay(monitorThread, 0, delay, unit);

        LOGGER.info("Start gateway monitor : DONE");
    }//from   ww  w.  j  av a2  s.c  o m
}

From source file:com.uber.stream.kafka.chaperone.collector.KafkaMonitor.java

public KafkaMonitor(long checkIntervalInSec, String brokerList, String auditTopics,
        IAuditReporter auditReporter) {//from  w ww  .  j a  v  a  2  s. c  om
    this.checkIntervalInSec = checkIntervalInSec;
    this.brokerList = Arrays.asList(StringUtils.split(brokerList, ","));
    this.auditTopics = Arrays.asList(StringUtils.split(auditTopics, ","));
    this.auditReporter = auditReporter;
    this.brokerConsumer = new HashMap<>();
    this.partitionLeader = new HashMap<>();
    this.partitionLag = new ConcurrentHashMap<>();
    this.partitionInjected = new HashSet<>();

    cronExecutor = Executors.newSingleThreadScheduledExecutor(
            new ThreadFactoryBuilder().setNameFormat("kafka-monitor-%d").build());
}

From source file:com.adaptris.core.jmx.JmxNotificationConsumer.java

@Override
public void init() throws CoreException {
    try {//ww  w.  j  a v  a  2s.  c  om
        scheduler = Executors
                .newSingleThreadScheduledExecutor(new ManagedThreadFactory(getClass().getSimpleName()));
        connection = retrieveConnection(JmxConnection.class).mbeanServerConnection();
        actualObjectName = ObjectName.getInstance(getDestination().getDestination());
    } catch (Exception e) {
        throw ExceptionHelper.wrapCoreException(e);
    }
}

From source file:org.apache.synapse.commons.beanstalk.enterprise.EnterpriseBeanstalkManager.java

/**
 * Initializes the beanstalk manager, which creates and initializes beanstalk defined in the
 * given Properties instance./*from w ww.j av a2s.  c  o  m*/
 * @param props Properties to read beanstalk configurations from. Usually, source of this is
 * synapse.properties file.
 */
public void init(Properties props) {

    if (props == null) {
        if (log.isDebugEnabled()) {
            log.debug("Beanstalk properties cannot be found.");
        }
        return;
    }

    String beanstalkNameList = MiscellaneousUtil.getProperty(props,
            EnterpriseBeanstalkConstants.SYNAPSE_BEANSTALK_PREFIX, null);

    if (beanstalkNameList == null || "".equals(beanstalkNameList)) {
        if (log.isDebugEnabled()) {
            log.debug("No beanstalks defined for initialization.");
        }
        return;
    }

    String[] beanstalkNames = beanstalkNameList.split(",");
    if (beanstalkNames == null || beanstalkNames.length == 0) {
        if (log.isDebugEnabled()) {
            log.debug("No beanstalk definitions found for initialization.");
        }
        return;
    }

    scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        public Thread newThread(Runnable r) {
            return new Thread(r, "enterprise-beanstalk-cleaner");
        }
    });

    for (String beanstalkName : beanstalkNames) {

        if (beanstalkName == null || beanstalkName.trim().length() == 0) {
            continue;
        }

        String propertyPrefix = EnterpriseBeanstalkConstants.SYNAPSE_BEANSTALK_PREFIX + "." + beanstalkName
                + ".";
        Properties currentBeanstalkProps = new Properties();

        for (Map.Entry<Object, Object> entry : props.entrySet()) {
            if (entry.getKey() instanceof String && entry.getValue() instanceof String) {
                String key = (String) entry.getKey();
                if (key.startsWith(propertyPrefix)) {
                    currentBeanstalkProps.setProperty(key.replace(propertyPrefix, ""),
                            (String) entry.getValue());
                }
            }
        }

        EnterpriseBeanstalk beanstalk = new EnterpriseBeanstalk(beanstalkName, currentBeanstalkProps,
                scheduler);
        beanstalk.init();
        beanstalkMap.put(beanstalkName, beanstalk);
    }
}

From source file:org.alfresco.repo.lock.JobLockServiceImpl.java

public JobLockServiceImpl() {
    defaultRetryWait = 20;//ww  w. jav  a 2s.c  om
    defaultRetryCount = 10;
    txnListener = new LockTransactionListener();

    TraceableThreadFactory threadFactory = new TraceableThreadFactory();
    threadFactory.setThreadDaemon(false);
    threadFactory.setNamePrefix("JobLockService");

    scheduler = Executors.newSingleThreadScheduledExecutor(threadFactory);

    shutdownListener = new VmShutdownListener("JobLockService");
}

From source file:com.astamuse.asta4d.web.util.timeout.DefaultSessionAwareExpirableDataManager.java

@Override
public void start() {
    service = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
        @Override//  w w  w  .  j  av  a 2s  . co  m
        public Thread newThread(Runnable r) {
            return new Thread(r, checkThreadName);
        }
    });

    // start check thread
    service.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            List<Entry<String, DataHolder>> entries = new ArrayList<>(dataMap.entrySet());
            long currentTime = System.currentTimeMillis();
            int removedCounter = 0;
            Object existing;
            for (Entry<String, DataHolder> entry : entries) {
                if (entry.getValue().isExpired(currentTime)) {
                    existing = dataMap.remove(entry.getKey());
                    if (existing != null) {// we removed it successfully
                        removedCounter++;
                    }
                }
            }
            if (removedCounter > 0) {
                addCount(-removedCounter);
            }
        }
    }, expirationCheckPeriodInMilliseconds, expirationCheckPeriodInMilliseconds, TimeUnit.MILLISECONDS);
}

From source file:org.apache.pulsar.functions.worker.WorkerService.java

public WorkerService(WorkerConfig workerConfig) {
    this.workerConfig = workerConfig;
    this.statsUpdater = Executors
            .newSingleThreadScheduledExecutor(new DefaultThreadFactory("worker-stats-updater"));
    this.executor = Executors.newScheduledThreadPool(10, new DefaultThreadFactory("pulsar-worker"));
    this.metricsGenerator = new MetricsGenerator(this.statsUpdater, workerConfig);
}

From source file:org.apache.tajo.util.metrics.reporter.TajoMetricsScheduledReporter.java

public void init(MetricRegistry registry, String metricsName, String hostAndPort,
        Map<String, String> metricsProperties) {
    this.registry = registry;
    this.metricsName = metricsName;
    this.executor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory(metricsName));
    this.rateFactor = TimeUnit.SECONDS.toSeconds(1);
    this.rateUnit = calculateRateUnit(TimeUnit.MILLISECONDS);
    this.durationFactor = 1.0 / TimeUnit.MILLISECONDS.toNanos(1);
    this.durationUnit = TimeUnit.MILLISECONDS.toString().toLowerCase(Locale.US);
    this.metricsProperties = metricsProperties;
    this.metricsPropertyKey = metricsName + "." + getReporterName() + ".";
    this.hostAndPort = hostAndPort;

    MetricsFilterList filterList = new MetricsFilterList();
    filterList.addMetricFilter(new GroupNameMetricsFilter(metricsName));

    String regexpFilterKey = metricsPropertyKey + "regexp.";
    Set<String> regexpExpressions = new HashSet<>();

    for (Map.Entry<String, String> entry : metricsProperties.entrySet()) {
        String key = entry.getKey();
        if (key.indexOf(regexpFilterKey) == 0) {
            regexpExpressions.add(entry.getValue());
        }/*  w  w  w. ja v a2  s .c  om*/
    }

    if (!regexpExpressions.isEmpty()) {
        filterList.addMetricFilter(new RegexpMetricsFilter(regexpExpressions));
    }
    this.filter = filterList;

    this.period = 60;
    if (metricsProperties.get(metricsPropertyKey + PERIOD_KEY) != null) {
        this.period = Integer.parseInt(metricsProperties.get(metricsPropertyKey + PERIOD_KEY));
    }
    afterInit();
}

From source file:gobblin.aws.AWSJobConfigurationManager.java

public AWSJobConfigurationManager(EventBus eventBus, Config config) {
    super(eventBus, config);
    this.jobConfFiles = Maps.newHashMap();
    if (config.hasPath(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL)) {
        this.refreshIntervalInSeconds = config
                .getDuration(GobblinAWSConfigurationKeys.JOB_CONF_REFRESH_INTERVAL, TimeUnit.SECONDS);
    } else {//from  w w  w.java  2s.  c o  m
        this.refreshIntervalInSeconds = DEFAULT_JOB_CONF_REFRESH_INTERVAL;
    }

    this.fetchJobConfExecutor = Executors.newSingleThreadScheduledExecutor(
            ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("FetchJobConfExecutor")));
}