Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:org.wso2.andes.kernel.slot.SlotMessageCounter.java

private SlotMessageCounter() {

    SLOT_SUBMIT_TIMEOUT = AndesConfigurationManager
            .readValue(AndesConfiguration.PERFORMANCE_TUNING_MAX_SLOT_SUBMIT_DELAY);

    slotWindowSize = AndesConfigurationManager
            .readValue(AndesConfiguration.PERFORMANCE_TUNING_SLOTS_SLOT_WINDOW_SIZE);

    timeOutForMessagesInQueue = AndesConfigurationManager
            .readValue(AndesConfiguration.PERFORMANCE_TUNING_SLOTS_MESSAGE_ACCUMULATION_TIMEOUT);

    slotCoordinator = MessagingEngine.getInstance().getSlotCoordinator();

    messageStoresUnavailable = false;//from  w  w  w  . j  av a 2  s.co m
    FailureObservingStoreManager.registerStoreHealthListener(this);

    ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("SlotMessageCounterTimeoutTask")
            .build();
    submitSlotToCoordinatorExecutor = Executors.newScheduledThreadPool(2, namedThreadFactory);
}

From source file:com.spotify.helios.serviceregistration.skydns.SkyDnsServiceRegistrar.java

/**
 * @param etcdClient client to talk to etcd with.
 * @param timeToLiveSeconds how long entries in the discovery service should live.
 * @param format the hostname format./*w w  w.j av a 2 s .  co  m*/
 */
public SkyDnsServiceRegistrar(final MiniEtcdClient etcdClient, final int timeToLiveSeconds,
        final String format) {
    this.etcdClient = Preconditions.checkNotNull(etcdClient);
    this.timeToLiveSeconds = timeToLiveSeconds;
    this.handles = Maps.newConcurrentMap();

    this.executor = MoreExecutors.getExitingScheduledExecutorService(
            (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, threadFactory), 0, SECONDS);

    // Dividing into thirds, since at least halves are necessary to ensure that the item doesn't
    // expire due to a slight delay, and went to thirds so that a single failure won't tank the
    // registration
    this.executor.scheduleAtFixedRate(registrationRunnable, timeToLiveSeconds / 3, timeToLiveSeconds / 3,
            SECONDS);
    this.srvFormat = format;
}

From source file:org.apache.phoenix.trace.TraceWriter.java

public void start() {

    traceSpanReceiver = getTraceSpanReceiver();
    if (traceSpanReceiver == null) {
        LOG.warn("No receiver has been initialized for TraceWriter. Traces will not be written.");
        LOG.warn("Restart Phoenix to try again.");
        return;//from   w  w  w  .j a  v a  2 s .  c o  m
    }

    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setDaemon(true).setNameFormat("PHOENIX-METRICS-WRITER");
    executor = Executors.newScheduledThreadPool(this.numThreads, builder.build());

    for (int i = 0; i < this.numThreads; i++) {
        executor.scheduleAtFixedRate(new FlushMetrics(), 0, 10, TimeUnit.SECONDS);
    }

    LOG.info("Writing tracing metrics to phoenix table");
}

From source file:edu.umass.cs.nio.AbstractPacketDemultiplexer.java

/**
 * /*from   w ww. ja va2  s.  c  o m*/
 * @param threadPoolSize
 *            Refer documentation for {@link #setThreadPoolSize(int)
 *            setThreadPoolsize(int)}.
 */
public AbstractPacketDemultiplexer(int threadPoolSize) {
    this.executor = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(threadPoolSize,
            new ThreadFactory() {
                @Override
                public Thread newThread(Runnable r) {
                    Thread thread = Executors.defaultThreadFactory().newThread(r);
                    thread.setName(threadName);
                    return thread;
                }
            });
    this.myThreadPoolSize = threadPoolSize;
}

From source file:org.opendaylight.genius.utils.batching.ResourceBatchingManager.java

public void registerBatchableResource(String resourceType, final BlockingQueue<ActionableResource> resQueue,
        final ResourceHandler resHandler) {
    Preconditions.checkNotNull(resQueue, "ResourceQueue to use for batching cannot not be null.");
    Preconditions.checkNotNull(resHandler, "ResourceHandler cannot not be null.");
    if (resourceHandlerMapper.contains(resourceType)) {
        throw new RuntimeException("Resource type already registered");
    }/*from   ww w  . j a v  a 2 s  . co m*/
    resourceHandlerMapper.put(resourceType, new ImmutablePair<>(resQueue, resHandler));
    ScheduledThreadPoolExecutor resDelegatorService = (ScheduledThreadPoolExecutor) Executors
            .newScheduledThreadPool(1, ThreadFactoryProvider.builder().namePrefix("ResourceBatchingManager")
                    .logger(LOG).build().get());
    resourceBatchingThreadMapper.put(resourceType, resDelegatorService);
    LOG.info("Registered resourceType {} with batchSize {} and batchInterval {}", resourceType,
            resHandler.getBatchSize(), resHandler.getBatchInterval());
    if (resDelegatorService.getPoolSize() == 0) {
        resDelegatorService.scheduleWithFixedDelay(new Batcher(resourceType), INITIAL_DELAY,
                resHandler.getBatchInterval(), TIME_UNIT);
    }
}

From source file:ai.grakn.engine.tasks.manager.multiqueue.Scheduler.java

public Scheduler(TaskStateStorage storage, ZookeeperConnection connection) {
    this.storage = storage;

    if (OPENED.compareAndSet(false, true)) {
        // Kafka listener
        consumer = kafkaConsumer(SCHEDULERS_GROUP);

        // Configure callback for a Kafka rebalance
        consumer.subscribe(singletonList(NEW_TASKS_TOPIC),
                rebalanceListener(consumer, new ExternalOffsetStorage(connection)));

        // Kafka writer
        producer = kafkaProducer();//from w w  w  .j ava 2  s .  c om

        waitToClose = new CountDownLatch(1);

        ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("scheduler-pool-%d")
                .build();
        schedulingService = Executors.newScheduledThreadPool(SCHEDULER_THREADS, namedThreadFactory);

        LOG.debug("Scheduler started");
    } else {
        LOG.error("Scheduled already opened!");
    }
}

From source file:org.apache.hadoop.hbase.replication.regionserver.Replication.java

public void initialize(final Server server, final FileSystem fs, final Path logDir, final Path oldLogDir)
        throws IOException {
    this.server = server;
    this.conf = this.server.getConfiguration();
    this.replication = isReplication(this.conf);
    this.scheduleThreadPool = Executors.newScheduledThreadPool(1,
            new ThreadFactoryBuilder()
                    .setNameFormat(server.getServerName().toShortString() + "Replication Statistics #%d")
                    .setDaemon(true).build());
    if (replication) {
        try {//from  ww w  .  j a  va2  s  .c o  m
            this.replicationQueues = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), this.conf,
                    this.server);
            this.replicationQueues.init(this.server.getServerName().toString());
            this.replicationPeers = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf,
                    this.server);
            this.replicationPeers.init();
            this.replicationTracker = ReplicationFactory.getReplicationTracker(server.getZooKeeper(),
                    this.replicationPeers, this.conf, this.server, this.server);
        } catch (ReplicationException e) {
            throw new IOException("Failed replication handler create", e);
        }
        UUID clusterId = null;
        try {
            clusterId = ZKClusterId.getUUIDForCluster(this.server.getZooKeeper());
        } catch (KeeperException ke) {
            throw new IOException("Could not read cluster id", ke);
        }
        this.replicationManager = new ReplicationSourceManager(replicationQueues, replicationPeers,
                replicationTracker, conf, this.server, fs, logDir, oldLogDir, clusterId);
        this.statsThreadPeriod = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60);
        LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod);
    } else {
        this.replicationManager = null;
        this.replicationQueues = null;
        this.replicationPeers = null;
        this.replicationTracker = null;
    }
}

From source file:org.msec.sink.es.ESSink.java

@Override
public void start() {
    super.start();

    initESThreadPool();//from   w  w w. j a  va 2s  . c o m

    rollService = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
            .setNameFormat("ESSink-Bulk-Timer" + Thread.currentThread().getId() + "-%d").build());

    rollService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            //LOG.debug("Marking time to bulk");
            bulkTimeout = true;
        }
    }, 3, 3, TimeUnit.SECONDS);

    LOG.info("ESSink {} started.", getName());
}

From source file:org.apache.hadoop.hive.llap.daemon.impl.QueryTracker.java

public QueryTracker(Configuration conf, String[] localDirsBase, String clusterId) {
    super("QueryTracker");
    this.localDirsBase = localDirsBase;
    this.clusterId = clusterId;
    try {/* ww w  . j  av  a2  s  .c  o m*/
        localFs = FileSystem.getLocal(conf);
    } catch (IOException e) {
        throw new RuntimeException("Failed to setup local filesystem instance", e);
    }

    this.defaultDeleteDelaySeconds = HiveConf.getTimeVar(conf, ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS,
            TimeUnit.SECONDS);

    int numCleanerThreads = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS);
    this.executorService = Executors.newScheduledThreadPool(numCleanerThreads,
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("QueryFileCleaner %d").build());
}

From source file:io.pravega.controller.server.ControllerServiceStarter.java

@Override
protected void startUp() {
    long traceId = LoggerHelpers.traceEnterWithContext(log, this.objectId, "startUp");
    log.info("Initiating controller service startUp");
    log.info("Event processors enabled = {}", serviceConfig.getEventProcessorConfig().isPresent());
    log.info("Cluster listener enabled = {}", serviceConfig.isControllerClusterListenerEnabled());
    log.info("    Host monitor enabled = {}", serviceConfig.getHostMonitorConfig().isHostMonitorEnabled());
    log.info("     gRPC server enabled = {}", serviceConfig.getGRPCServerConfig().isPresent());
    log.info("     REST server enabled = {}", serviceConfig.getRestServerConfig().isPresent());

    final StreamMetadataStore streamStore;
    final TaskMetadataStore taskMetadataStore;
    final HostControllerStore hostStore;
    final CheckpointStore checkpointStore;

    try {//from   w ww. ja v a2 s .  c o m
        //Initialize the executor service.
        controllerExecutor = Executors.newScheduledThreadPool(serviceConfig.getThreadPoolSize(),
                new ThreadFactoryBuilder().setNameFormat("controllerpool-%d").build());

        log.info("Creating the stream store");
        streamStore = StreamStoreFactory.createStore(storeClient, controllerExecutor);

        log.info("Creating the task store");
        taskMetadataStore = TaskStoreFactory.createStore(storeClient, controllerExecutor);

        log.info("Creating the host store");
        hostStore = HostStoreFactory.createStore(serviceConfig.getHostMonitorConfig(), storeClient);

        log.info("Creating the checkpoint store");
        checkpointStore = CheckpointStoreFactory.create(storeClient);

        // On each controller process restart, we use a fresh hostId,
        // which is a combination of hostname and random GUID.
        String hostName = getHostName();
        Host host = new Host(hostName, getPort(), UUID.randomUUID().toString());

        if (serviceConfig.getHostMonitorConfig().isHostMonitorEnabled()) {
            //Start the Segment Container Monitor.
            monitor = new SegmentContainerMonitor(hostStore, (CuratorFramework) storeClient.getClient(),
                    new UniformContainerBalancer(),
                    serviceConfig.getHostMonitorConfig().getHostMonitorMinRebalanceInterval());
            log.info("Starting segment container monitor");
            monitor.startAsync();
        }

        connectionFactory = new ConnectionFactoryImpl(false);
        SegmentHelper segmentHelper = new SegmentHelper();

        streamMetadataTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelper,
                controllerExecutor, host.getHostId(), connectionFactory);
        streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, hostStore,
                segmentHelper, controllerExecutor, host.getHostId(), connectionFactory);

        // Controller has a mechanism to track the currently active controller host instances. On detecting a failure of
        // any controller instance, the failure detector stores the failed HostId in a failed hosts directory (FH), and
        // invokes the taskSweeper.sweepOrphanedTasks for each failed host. When all resources under the failed hostId
        // are processed and deleted, that failed HostId is removed from FH folder.
        // Moreover, on controller process startup, it detects any hostIds not in the currently active set of
        // controllers and starts sweeping tasks orphaned by those hostIds.
        TaskSweeper taskSweeper = new TaskSweeper(taskMetadataStore, host.getHostId(), controllerExecutor,
                streamMetadataTasks);

        TxnSweeper txnSweeper = new TxnSweeper(streamStore, streamTransactionMetadataTasks,
                serviceConfig.getTimeoutServiceConfig().getMaxLeaseValue(), controllerExecutor);

        if (serviceConfig.isControllerClusterListenerEnabled()) {
            cluster = new ClusterZKImpl((CuratorFramework) storeClient.getClient(), ClusterType.CONTROLLER);
        }

        controllerService = new ControllerService(streamStore, hostStore, streamMetadataTasks,
                streamTransactionMetadataTasks, new SegmentHelper(), controllerExecutor, cluster);

        // Setup event processors.
        setController(new LocalController(controllerService));

        if (serviceConfig.getEventProcessorConfig().isPresent()) {
            // Create ControllerEventProcessor object.
            controllerEventProcessors = new ControllerEventProcessors(host.getHostId(),
                    serviceConfig.getEventProcessorConfig().get(), localController, checkpointStore,
                    streamStore, hostStore, segmentHelper, connectionFactory, streamMetadataTasks,
                    controllerExecutor);

            // Bootstrap and start it asynchronously.
            log.info("Starting event processors");
            controllerEventProcessors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks)
                    .thenAcceptAsync(x -> controllerEventProcessors.startAsync(), controllerExecutor);
        }

        // Setup and start controller cluster listener after all sweepers have been initialized.
        if (serviceConfig.isControllerClusterListenerEnabled()) {
            List<FailoverSweeper> failoverSweepers = new ArrayList<>();
            failoverSweepers.add(taskSweeper);
            failoverSweepers.add(txnSweeper);
            if (serviceConfig.getEventProcessorConfig().isPresent()) {
                assert controllerEventProcessors != null;
                failoverSweepers.add(controllerEventProcessors);
            }

            controllerClusterListener = new ControllerClusterListener(host, cluster, controllerExecutor,
                    failoverSweepers);

            log.info("Starting controller cluster listener");
            controllerClusterListener.startAsync();
        }

        // Start RPC server.
        if (serviceConfig.getGRPCServerConfig().isPresent()) {
            grpcServer = new GRPCServer(controllerService, serviceConfig.getGRPCServerConfig().get());
            grpcServer.startAsync();
            log.info("Awaiting start of rpc server");
            grpcServer.awaitRunning();
        }

        // Start REST server.
        if (serviceConfig.getRestServerConfig().isPresent()) {
            restServer = new RESTServer(controllerService, serviceConfig.getRestServerConfig().get());
            restServer.startAsync();
            log.info("Awaiting start of REST server");
            restServer.awaitRunning();
        }

        // Wait for controller event processors to start.
        if (serviceConfig.getEventProcessorConfig().isPresent()) {
            log.info("Awaiting start of controller event processors");
            controllerEventProcessors.awaitRunning();
        }

        // Wait for controller cluster listeners to start.
        if (serviceConfig.isControllerClusterListenerEnabled()) {
            log.info("Awaiting start of controller cluster listener");
            controllerClusterListener.awaitRunning();
        }
    } finally {
        LoggerHelpers.traceLeave(log, this.objectId, "startUp", traceId);
    }
}