Example usage for java.util.concurrent ScheduledExecutorService scheduleWithFixedDelay

List of usage examples for java.util.concurrent ScheduledExecutorService scheduleWithFixedDelay

Introduction

In this page you can find the example usage for java.util.concurrent ScheduledExecutorService scheduleWithFixedDelay.

Prototype

public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long initialDelay, long delay,
        TimeUnit unit);

Source Link

Document

Submits a periodic action that becomes enabled first after the given initial delay, and subsequently with the given delay between the termination of one execution and the commencement of the next.

Usage

From source file:org.codice.ddf.commands.catalog.IngestCommand.java

private void submitToCatalog(ScheduledExecutorService batchScheduler, ExecutorService executorService,
        ArrayBlockingQueue<Metacard> metacardQueue, CatalogFacade catalog, long start) {

    batchScheduler.scheduleWithFixedDelay(() -> {
        int queueSize = metacardQueue.size();
        if (queueSize > 0) {

            ArrayList<Metacard> metacardBatch = new ArrayList<>(batchSize);

            if (queueSize > batchSize || doneBuildingQueue.get()) {
                metacardQueue.drainTo(metacardBatch, batchSize);
                processingThreads.incrementAndGet();
            }//from w w  w .  j  ava  2 s.c  o  m

            if (metacardBatch.size() > 0) {
                executorService.submit(() -> {
                    try {
                        processBatch(catalog, metacardBatch);
                    } catch (SourceUnavailableException e) {
                        if (INGEST_LOGGER.isWarnEnabled()) {
                            INGEST_LOGGER.warn("Error on process batch: {}", e);
                        }
                    }
                });
                printProgressAndFlush(start, fileCount.get(), ingestCount.get() + ignoreCount.get());
            }
        }
    }, 100, 100, TimeUnit.MILLISECONDS);
}

From source file:org.openhab.binding.network.internal.PresenceDetection.java

/**
 * Start/Restart a fixed scheduled runner to update the devices reach-ability state.
 *
 * @param scheduledExecutorService A scheduler to run pings periodically.
 *///from   w  w w.  j  a  va  2s  .c om
public void startAutomaticRefresh(ScheduledExecutorService scheduledExecutorService) {
    if (refreshJob != null && !refreshJob.isDone()) {
        refreshJob.cancel(true);
    }
    refreshJob = scheduledExecutorService.scheduleWithFixedDelay(() -> performPresenceDetection(true), 0,
            refreshIntervalInMS, TimeUnit.MILLISECONDS);

    enableDHCPListen(useDHCPsniffing);
}

From source file:org.wso2.carbon.device.mgt.core.internal.DeviceManagementServiceComponent.java

@SuppressWarnings("unused")
protected void activate(ComponentContext componentContext) {
    try {/*from w  w w .jav a  2s .c  o m*/
        if (log.isDebugEnabled()) {
            log.debug("Initializing device management core bundle");
        }
        /* Initializing Device Management Configuration */
        DeviceConfigurationManager.getInstance().initConfig();
        DeviceManagementConfig config = DeviceConfigurationManager.getInstance().getDeviceManagementConfig();

        DataSourceConfig dsConfig = config.getDeviceManagementConfigRepository().getDataSourceConfig();

        DeviceManagementDAOFactory.init(dsConfig);
        GroupManagementDAOFactory.init(dsConfig);
        NotificationManagementDAOFactory.init(dsConfig);
        OperationManagementDAOFactory.init(dsConfig);
        /*Initialize the device cache*/
        DeviceManagerUtil.initializeDeviceCache();

        /* Initialize Operation Manager */
        this.initOperationsManager();

        PushNotificationProviderRepository pushNotificationRepo = new PushNotificationProviderRepository();
        List<String> pushNotificationProviders = config.getPushNotificationConfiguration()
                .getPushNotificationProviders();
        if (pushNotificationProviders != null) {
            for (String pushNoteProvider : pushNotificationProviders) {
                pushNotificationRepo.addProvider(pushNoteProvider);
            }
        }
        DeviceManagementDataHolder.getInstance().setPushNotificationProviderRepository(pushNotificationRepo);

        /* If -Dsetup option enabled then create device management database schema */
        String setupOption = System.getProperty(DeviceManagementConstants.Common.SETUP_PROPERTY);
        if (setupOption != null) {
            if (log.isDebugEnabled()) {
                log.debug("-Dsetup is enabled. Device management repository schema initialization is about to "
                        + "begin");
            }
            this.setupDeviceManagementSchema(dsConfig);
        }

        /* Registering declarative service instances exposed by DeviceManagementServiceComponent */
        this.registerServices(componentContext);

        /* This is a workaround to initialize all Device Management Service Providers after the initialization
         * of Device Management Service component in order to avoid bundle start up order related complications */
        notifyStartupListeners();
        if (log.isDebugEnabled()) {
            log.debug("Push notification batch enabled : "
                    + config.getPushNotificationConfiguration().isSchedulerTaskEnabled());
        }
        // Start Push Notification Scheduler Task
        if (config.getPushNotificationConfiguration().isSchedulerTaskEnabled()) {
            if (config.getPushNotificationConfiguration().getSchedulerBatchSize() <= 0) {
                log.error("Push notification batch size cannot be 0 or less than 0. Setting default batch size "
                        + "to:" + DeviceManagementConstants.PushNotifications.DEFAULT_BATCH_SIZE);
                config.getPushNotificationConfiguration()
                        .setSchedulerBatchSize(DeviceManagementConstants.PushNotifications.DEFAULT_BATCH_SIZE);
            }
            if (config.getPushNotificationConfiguration().getSchedulerBatchDelayMills() <= 0) {
                log.error(
                        "Push notification batch delay cannot be 0 or less than 0. Setting default batch delay "
                                + "milliseconds to"
                                + DeviceManagementConstants.PushNotifications.DEFAULT_BATCH_DELAY_MILLS);
                config.getPushNotificationConfiguration().setSchedulerBatchDelayMills(
                        DeviceManagementConstants.PushNotifications.DEFAULT_BATCH_DELAY_MILLS);
            }
            if (config.getPushNotificationConfiguration().getSchedulerTaskInitialDelay() < 0) {
                log.error("Push notification initial delay cannot be less than 0. Setting default initial "
                        + "delay milliseconds to"
                        + DeviceManagementConstants.PushNotifications.DEFAULT_SCHEDULER_TASK_INITIAL_DELAY);
                config.getPushNotificationConfiguration().setSchedulerTaskInitialDelay(
                        DeviceManagementConstants.PushNotifications.DEFAULT_SCHEDULER_TASK_INITIAL_DELAY);
            }
            ScheduledExecutorService pushNotificationExecutor = Executors.newSingleThreadScheduledExecutor();
            pushNotificationExecutor.scheduleWithFixedDelay(new PushNotificationSchedulerTask(),
                    config.getPushNotificationConfiguration().getSchedulerTaskInitialDelay(),
                    config.getPushNotificationConfiguration().getSchedulerBatchDelayMills(),
                    TimeUnit.MILLISECONDS);
        }

        PrivacyComplianceProvider privacyComplianceProvider = new PrivacyComplianceProviderImpl();
        DeviceManagementDataHolder.getInstance().setPrivacyComplianceProvider(privacyComplianceProvider);
        componentContext.getBundleContext().registerService(PrivacyComplianceProvider.class.getName(),
                privacyComplianceProvider, null);

        if (log.isDebugEnabled()) {
            log.debug("Device management core bundle has been successfully initialized");
        }
    } catch (Throwable e) {
        log.error("Error occurred while initializing device management core bundle", e);
    }
}

From source file:org.apache.ambari.server.bootstrap.BSRunner.java

@Override
public void run() {

    if (sshHostInfo.getSshKey() == null || sshHostInfo.getSshKey().equals("")) {
        beforeBootStrap(sshHostInfo);/*from   w ww  .  j  a v a 2s  .  c  om*/
    }

    String hostString = createHostString(sshHostInfo.getHosts());
    String user = sshHostInfo.getUser();
    String userRunAs = sshHostInfo.getUserRunAs();
    if (user == null || user.isEmpty()) {
        user = DEFAULT_USER;
    }
    String command[] = new String[12];
    BSStat stat = BSStat.RUNNING;
    String scriptlog = "";
    try {
        createRunDir();
        if (LOG.isDebugEnabled()) {
            // FIXME needs to be removed later
            // security hole
            LOG.debug("Using ssh key=\"" + sshHostInfo.getSshKey() + "\"");
        }

        String password = sshHostInfo.getPassword();
        if (password != null && !password.isEmpty()) {
            this.passwordFile = new File(this.requestIdDir, "host_pass");
            // TODO : line separator should be changed
            // if we are going to support multi platform server-agent solution
            String lineSeparator = System.getProperty("line.separator");
            password = password + lineSeparator;
            writePasswordFile(password);
        }

        writeSshKeyFile(sshHostInfo.getSshKey());
        /* Running command:
         * script hostlist bsdir user sshkeyfile
         */
        command[0] = this.bsScript;
        command[1] = hostString;
        command[2] = this.requestIdDir.toString();
        command[3] = user;
        command[4] = this.sshKeyFile.toString();
        command[5] = this.agentSetupScript.toString();
        command[6] = this.ambariHostname;
        command[7] = this.clusterOsFamily;
        command[8] = this.projectVersion;
        command[9] = this.serverPort + "";
        command[10] = userRunAs;
        command[11] = (this.passwordFile == null) ? "null" : this.passwordFile.toString();
        LOG.info("Host= " + hostString + " bs=" + this.bsScript + " requestDir=" + requestIdDir + " user="
                + user + " keyfile=" + this.sshKeyFile + " passwordFile " + this.passwordFile + " server="
                + this.ambariHostname + " version=" + projectVersion + " serverPort=" + this.serverPort
                + " userRunAs=" + userRunAs);

        String[] env = new String[] { "AMBARI_PASSPHRASE=" + agentSetupPassword };
        if (this.verbose)
            env = new String[] { env[0], " BS_VERBOSE=\"-vvv\" " };

        if (LOG.isDebugEnabled()) {
            LOG.debug(Arrays.toString(command));
        }

        String bootStrapOutputFilePath = requestIdDir + File.separator + "bootstrap.out";
        String bootStrapErrorFilePath = requestIdDir + File.separator + "bootstrap.err";

        Process process = Runtime.getRuntime().exec(command, env);

        PrintWriter stdOutWriter = null;
        PrintWriter stdErrWriter = null;

        try {
            stdOutWriter = new PrintWriter(bootStrapOutputFilePath);
            stdErrWriter = new PrintWriter(bootStrapErrorFilePath);
            IOUtils.copy(process.getInputStream(), stdOutWriter);
            IOUtils.copy(process.getErrorStream(), stdErrWriter);
        } finally {
            if (stdOutWriter != null)
                stdOutWriter.close();

            if (stdErrWriter != null)
                stdErrWriter.close();
        }

        // Startup a scheduled executor service to look through the logs
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
        BSStatusCollector statusCollector = new BSStatusCollector();
        ScheduledFuture<?> handle = scheduler.scheduleWithFixedDelay(statusCollector, 0, 10, TimeUnit.SECONDS);
        LOG.info("Kicking off the scheduler for polling on logs in " + this.requestIdDir);
        try {

            LOG.info("Bootstrap output, log=" + bootStrapErrorFilePath + " " + bootStrapOutputFilePath);
            int exitCode = process.waitFor();
            String outMesg = "";
            String errMesg = "";
            try {
                outMesg = FileUtils.readFileToString(new File(bootStrapOutputFilePath));
                errMesg = FileUtils.readFileToString(new File(bootStrapErrorFilePath));
            } catch (IOException io) {
                LOG.info("Error in reading files ", io);
            }
            scriptlog = outMesg + "\n\n" + errMesg;
            LOG.info("Script log Mesg " + scriptlog);
            if (exitCode != 0) {
                stat = BSStat.ERROR;
            } else {
                stat = BSStat.SUCCESS;
            }

            scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
            long startTime = System.currentTimeMillis();
            while (true) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Waiting for hosts status to be updated");
                }
                boolean pendingHosts = false;
                BootStrapStatus tmpStatus = bsImpl.getStatus(requestId);
                List<BSHostStatus> hostStatusList = tmpStatus.getHostsStatus();
                if (hostStatusList != null) {
                    for (BSHostStatus status : hostStatusList) {
                        if (status.getStatus().equals("RUNNING")) {
                            pendingHosts = true;
                        }
                    }
                } else {
                    //Failed to get host status, waiting for hosts status to be updated
                    pendingHosts = true;
                }
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Whether hosts status yet to be updated, pending=" + pendingHosts);
                }
                if (!pendingHosts) {
                    break;
                }
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    // continue
                }
                long now = System.currentTimeMillis();
                if (now >= (startTime + 15000)) {
                    LOG.warn("Gave up waiting for hosts status to be updated");
                    break;
                }
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        } finally {
            handle.cancel(true);
            /* schedule a last update */
            scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
            scheduler.shutdownNow();
            try {
                scheduler.awaitTermination(10, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOG.info("Interruped while waiting for scheduler");
            }
            process.destroy();
        }
    } catch (IOException io) {
        LOG.info("Error executing bootstrap " + io.getMessage());
        stat = BSStat.ERROR;
    } finally {
        /* get the bstatus */
        BootStrapStatus tmpStatus = bsImpl.getStatus(requestId);
        List<BSHostStatus> hostStatusList = tmpStatus.getHostsStatus();
        if (hostStatusList != null) {
            for (BSHostStatus hostStatus : hostStatusList) {
                if ("FAILED".equals(hostStatus.getStatus())) {
                    stat = BSStat.ERROR;
                    break;
                }
            }
        } else {
            stat = BSStat.ERROR;
        }
        tmpStatus.setLog(scriptlog);
        tmpStatus.setStatus(stat);
        bsImpl.updateStatus(requestId, tmpStatus);
        bsImpl.reset();
        // Remove private ssh key after bootstrap is complete
        try {
            FileUtils.forceDelete(sshKeyFile);
        } catch (IOException io) {
            LOG.warn(io.getMessage());
        }
        if (passwordFile != null) {
            // Remove password file after bootstrap is complete
            try {
                FileUtils.forceDelete(passwordFile);
            } catch (IOException io) {
                LOG.warn(io.getMessage());
            }
        }
        finished();
    }
}

From source file:org.springframework.scheduling.concurrent.ScheduledExecutorFactoryBean.java

/**
 * Register the specified {@link ScheduledExecutorTask ScheduledExecutorTasks}
 * on the given {@link ScheduledExecutorService}.
 * @param tasks the specified ScheduledExecutorTasks (never empty)
 * @param executor the ScheduledExecutorService to register the tasks on.
 *///  ww w.ja  v a 2s. c  om
protected void registerTasks(ScheduledExecutorTask[] tasks, ScheduledExecutorService executor) {
    for (int i = 0; i < tasks.length; i++) {
        ScheduledExecutorTask task = tasks[i];
        Runnable runnable = getRunnableToSchedule(task);
        if (task.isOneTimeTask()) {
            executor.schedule(runnable, task.getDelay(), task.getTimeUnit());
        } else {
            if (task.isFixedRate()) {
                executor.scheduleAtFixedRate(runnable, task.getDelay(), task.getPeriod(), task.getTimeUnit());
            } else {
                executor.scheduleWithFixedDelay(runnable, task.getDelay(), task.getPeriod(),
                        task.getTimeUnit());
            }
        }
    }
}

From source file:org.wso2.carbon.core.clustering.hazelcast.HazelcastClusteringAgent.java

public void init() throws ClusteringFault {
    MemberUtils.init(parameters, configurationContext);

    primaryHazelcastConfig = new Config();
    setHazelcastProperties();//from  www  .j a v a  2s  . com

    Parameter managementCenterURL = getParameter(HazelcastConstants.MGT_CENTER_URL);
    if (managementCenterURL != null) {
        primaryHazelcastConfig.getManagementCenterConfig().setEnabled(true)
                .setUrl((String) managementCenterURL.getValue());
    }

    Parameter licenseKey = getParameter(HazelcastConstants.LICENSE_KEY);
    if (licenseKey != null) {
        primaryHazelcastConfig.setLicenseKey((String) licenseKey.getValue());
    }

    primaryDomain = getClusterDomain();
    primaryHazelcastConfig.setInstanceName(primaryDomain + ".instance");
    log.info("Cluster domain: " + primaryDomain);
    GroupConfig groupConfig = primaryHazelcastConfig.getGroupConfig();
    groupConfig.setName(primaryDomain);
    Parameter memberPassword = getParameter(HazelcastConstants.GROUP_PASSWORD);
    if (memberPassword != null) {
        groupConfig.setPassword((String) memberPassword.getValue());
    }

    NetworkConfig nwConfig = primaryHazelcastConfig.getNetworkConfig();
    Parameter localMemberHostParam = getParameter(HazelcastConstants.LOCAL_MEMBER_HOST);
    String localMemberHost = "";
    if (localMemberHostParam != null) {
        localMemberHost = ((String) localMemberHostParam.getValue()).trim();
        if ("127.0.0.1".equals(localMemberHost) || "localhost".equals(localMemberHost)) {
            log.warn("localMemberHost is configured to use the loopback address. "
                    + "Hazelcast Clustering needs ip addresses for localMemberHost and well-known members.");
        }
    } else {
        try {
            localMemberHost = Utils.getIpAddress();
        } catch (SocketException e) {
            log.error("Could not set local member host", e);
        }
    }
    nwConfig.setPublicAddress(localMemberHost);
    int localMemberPort = 4000;
    Parameter localMemberPortParam = getParameter(HazelcastConstants.LOCAL_MEMBER_PORT);
    if (localMemberPortParam != null) {
        localMemberPort = Integer.parseInt(((String) localMemberPortParam.getValue()).trim());
    }
    nwConfig.setPort(localMemberPort);

    configureMembershipScheme(nwConfig);
    MapConfig mapConfig = new MapConfig("carbon-map-config");
    mapConfig.setEvictionPolicy(MapConfig.DEFAULT_EVICTION_POLICY);
    if (licenseKey != null) {
        mapConfig.setInMemoryFormat(InMemoryFormat.BINARY);
    }
    primaryHazelcastConfig.addMapConfig(mapConfig);
    loadCustomHazelcastSerializers();

    if (clusterManagementMode) {
        for (Map.Entry<String, Map<String, GroupManagementAgent>> entry : groupManagementAgents.entrySet()) {
            for (GroupManagementAgent agent : entry.getValue().values()) {
                if (agent instanceof HazelcastGroupManagementAgent) {
                    ((HazelcastGroupManagementAgent) agent).init(primaryHazelcastConfig, configurationContext);
                }
            }
        }
    }
    long start = System.currentTimeMillis();
    primaryHazelcastInstance = Hazelcast.newHazelcastInstance(primaryHazelcastConfig);
    log.info("Hazelcast initialized in " + (System.currentTimeMillis() - start) + "ms");
    HazelcastCarbonClusterImpl hazelcastCarbonCluster = new HazelcastCarbonClusterImpl(
            primaryHazelcastInstance);

    membershipScheme.setPrimaryHazelcastInstance(primaryHazelcastInstance);
    membershipScheme.setCarbonCluster(hazelcastCarbonCluster);

    clusteringMessageTopic = primaryHazelcastInstance.getTopic(HazelcastConstants.CLUSTERING_MESSAGE_TOPIC);
    clusteringMessageTopic.addMessageListener(
            new HazelcastClusterMessageListener(configurationContext, recdMsgsBuffer, sentMsgsBuffer));
    groupManagementTopic = primaryHazelcastInstance.getTopic(HazelcastConstants.GROUP_MGT_CMD_TOPIC);
    groupManagementTopic.addMessageListener(new GroupManagementCommandListener(configurationContext));
    ITopic<ControlCommand> controlCommandTopic = primaryHazelcastInstance
            .getTopic(HazelcastConstants.CONTROL_COMMAND_TOPIC);
    controlCommandTopic.addMessageListener(new HazelcastControlCommandListener(configurationContext));

    Member localMember = primaryHazelcastInstance.getCluster().getLocalMember();
    membershipScheme.setLocalMember(localMember);
    membershipScheme.joinGroup();
    localMember = primaryHazelcastInstance.getCluster().getLocalMember();
    localMember.getInetSocketAddress().getPort();
    final org.apache.axis2.clustering.Member carbonLocalMember = MemberUtils.getLocalMember(primaryDomain,
            localMember.getInetSocketAddress().getAddress().getHostAddress(),
            localMember.getInetSocketAddress().getPort());
    log.info("Local member: [" + localMember.getUuid() + "] - " + carbonLocalMember);

    //Create a Queue for receiving messages from others
    final ITopic<ClusterMessage> replayedMsgs = primaryHazelcastInstance
            .getTopic(HazelcastConstants.REPLAY_MESSAGE_QUEUE + localMember.getUuid());
    replayedMsgs.addMessageListener(new MessageListener<ClusterMessage>() {

        @Override
        public void onMessage(Message<ClusterMessage> clusterMessage) {
            ClusterMessage msg = clusterMessage.getMessageObject();
            // check UUID to eliminate duplicates
            if (!recdMsgsBuffer.containsKey(msg.getUuid())) {
                log.info("Received replayed message: " + msg.getUuid());
                msg.execute();
                recdMsgsBuffer.put(msg.getUuid(), System.currentTimeMillis());
            }
        }
    });

    if (carbonLocalMember.getProperties().get("subDomain") == null) {
        carbonLocalMember.getProperties().put("subDomain", "__$default"); // Set the default subDomain
    }
    MemberUtils.getMembersMap(primaryHazelcastInstance, primaryDomain).put(localMember.getUuid(),
            carbonLocalMember);

    // To receive membership events required for the leader election algorithm.
    primaryHazelcastInstance.getCluster().addMembershipListener(new CoordinatorElectionMembershipListener());

    //-- Cluster coordinator election algorithm implementation starts here.
    // Hazelcast Community confirms that the list of members is consistent across a given cluster.  Also the first
    // member of the member list you get from primaryHazelcastInstance.getCluster().getMembers() is consistent
    // across the cluster and first member usually is the oldest member.  Therefore we can safely assume first
    // member as the coordinator node.

    // Now this distributed lock is used to correctly identify the coordinator node during the member startup. The
    // node which acquires the lock checks whether it is the oldest member in the cluster. If it is the oldest
    // member then it elects itself as the coordinator node and then release the lock. If it is not the oldest
    // member them simply release the lock. This distributed lock is used to avoid any race conditions.
    ILock lock = primaryHazelcastInstance.getLock(HazelcastConstants.CLUSTER_COORDINATOR_LOCK);

    try {
        log.debug("Trying to get the CLUSTER_COORDINATOR_LOCK lock.");

        lock.lock();
        log.debug("Acquired the CLUSTER_COORDINATOR_LOCK lock.");

        Member oldestMember = primaryHazelcastInstance.getCluster().getMembers().iterator().next();
        if (oldestMember.localMember() && !isCoordinator) {
            electCoordinatorNode();
        }
    } finally {
        lock.unlock();
        log.debug("Released the CLUSTER_COORDINATOR_LOCK lock.");
    }
    //-- Coordinator election algorithm ends here.

    BundleContext bundleContext = CarbonCoreDataHolder.getInstance().getBundleContext();
    bundleContext.registerService(DistributedMapProvider.class,
            new HazelcastDistributedMapProvider(primaryHazelcastInstance), null);
    bundleContext.registerService(HazelcastInstance.class, primaryHazelcastInstance, null);
    bundleContext.registerService(CarbonCluster.class, hazelcastCarbonCluster, null);
    ScheduledExecutorService msgCleanupScheduler = Executors.newScheduledThreadPool(1);
    msgCleanupScheduler.scheduleWithFixedDelay(new ClusterMessageCleanupTask(), 2, 2, TimeUnit.MINUTES);

    log.info("Cluster initialization completed");
}

From source file:org.apache.james.fetchmail.FetchScheduler.java

@PostConstruct
public void init() throws Exception {
    enabled = conf.getBoolean("[@enabled]", false);
    if (enabled) {
        int numThreads = conf.getInt("threads", 5);
        String jmxName = conf.getString("jmxName", "fetchmail");
        String jmxPath = "org.apache.james:type=component,name=" + jmxName + ",sub-type=threadpool";

        /*/*from  w w w.  j av a 2  s .  c  o  m*/
        The scheduler service that is used to trigger fetch tasks.
        */
        ScheduledExecutorService scheduler = new JMXEnabledScheduledThreadPoolExecutor(numThreads, jmxPath,
                "scheduler");
        MailQueue queue = queueFactory.getQueue(MailQueueFactory.SPOOL);

        List<HierarchicalConfiguration> fetchConfs = conf.configurationsAt("fetch");
        for (HierarchicalConfiguration fetchConf : fetchConfs) {
            // read configuration
            Long interval = fetchConf.getLong("interval");

            FetchMail fetcher = new FetchMail();

            fetcher.setLog(logger);
            fetcher.setDNSService(dns);
            fetcher.setUsersRepository(urepos);
            fetcher.setMailQueue(queue);
            fetcher.setDomainList(domainList);

            fetcher.configure(fetchConf);

            // initialize scheduling
            schedulers.add(scheduler.scheduleWithFixedDelay(fetcher, 0, interval, TimeUnit.MILLISECONDS));
        }

        if (logger.isInfoEnabled())
            logger.info("FetchMail Started");
    } else {
        if (logger.isInfoEnabled())
            logger.info("FetchMail Disabled");
    }
}

From source file:org.apache.nifi.controller.StandardFlowService.java

@Override
public void start() throws LifeCycleStartException {
    writeLock.lock();/*  w  w w.  j av  a2 s.com*/
    try {
        if (isRunning()) {
            return;
        }

        running.set(true);

        final ScheduledExecutorService newExecutor = new FlowEngine(2, "Flow Service Tasks");
        newExecutor.scheduleWithFixedDelay(new SaveReportingTask(), 0L, 500L, TimeUnit.MILLISECONDS);
        this.executor.set(newExecutor);

        if (configuredForClustering) {
            senderListener.start();
        }

    } catch (final IOException ioe) {
        try {
            stop(/* force */true);
        } catch (final Exception e) {
        }

        throw new LifeCycleStartException("Failed to start Flow Service due to: " + ioe, ioe);
    } finally {
        writeLock.unlock();
    }
}

From source file:com.pinterest.pinlater.backends.redis.PinLaterRedisBackend.java

/**
 * Creates an instance of the PinLaterRedisBackend.
 *
 * @param configuration configuration parameters for the backend.
 * @param redisConfigStream stream encapsulating the Redis json config.
 * @param serverHostName hostname of the PinLater server.
 * @param serverStartTimeMillis start time of the PinLater server.
 *//*from  www  .j  a va2  s. c o  m*/
public PinLaterRedisBackend(PropertiesConfiguration configuration, InputStream redisConfigStream,
        String serverHostName, long serverStartTimeMillis) throws Exception {
    super(configuration, "Redis", serverHostName, serverStartTimeMillis);
    this.shardMap = RedisBackendUtils.buildShardMap(redisConfigStream, configuration);
    this.healthChecker = new HealthChecker("PinLaterRedis");
    for (RedisPools redisPools : shardMap.values()) {
        this.healthChecker.addServer(redisPools.getHost(), redisPools.getPort(),
                new RedisHeartBeater(new JedisClientHelper(), redisPools.getMonitorRedisPool()),
                configuration.getInt("REDIS_HEALTH_CHECK_CONSECUTIVE_FAILURES", 6),
                configuration.getInt("REDIS_HEALTH_CHECK_CONSECUTIVE_SUCCESSES", 6),
                configuration.getInt("REDIS_HEALTH_CHECK_PING_INTERVAL_SECONDS", 5), true); // is live initially
    }

    // Start the JobQueueMonitor scheduled task.
    final int delaySeconds = configuration.getInt("BACKEND_MONITOR_THREAD_DELAY_SECONDS");
    ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor(
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat("RedisJobQueueMonitor-%d").build());
    service.scheduleWithFixedDelay(new RedisQueueMonitor(shardMap, configuration, healthChecker),
            // Randomize initial delay to prevent all servers from running GC at the same time.
            delaySeconds + RANDOM.nextInt(delaySeconds), delaySeconds, TimeUnit.SECONDS);

    // Load queue names into memory. Silently catch exceptions to avoid failure in initialization.
    // If queue names are not loaded at this time, they will be retried upon requests.
    try {
        reloadQueueNames();
    } catch (Exception e) {
        // Retry the ack.
        Stats.incr("init-queuenames-failure");
        LOG.error("Failed to load queue names upon initialization.", e);
    }

    // Call Base class's initialization function to initialize the futurePool and dequeue
    // semaphoreMap.
    initialize();
}

From source file:org.openhab.binding.homematic.internal.communicator.AbstractHomematicGateway.java

/**
 * {@inheritDoc}//from  ww  w . j  a v  a  2 s .  c  o  m
 */
@Override
public void startWatchdogs() {
    ScheduledExecutorService scheduler = ThreadPoolManager.getScheduledPool(GATEWAY_POOL_NAME);

    if (config.getReconnectInterval() == 0) {
        logger.debug("Starting event tracker for gateway with id '{}'", id);
        eventTrackerThread = scheduler.scheduleWithFixedDelay(new EventTrackerThread(), 1, 1, TimeUnit.MINUTES);
    } else {
        // schedule fixed delay restart
        logger.debug("Starting reconnect tracker for gateway with id '{}'", id);
        reconnectThread = scheduler.scheduleWithFixedDelay(new ReconnectThread(), config.getReconnectInterval(),
                config.getReconnectInterval(), TimeUnit.SECONDS);
    }
    logger.debug("Starting connection tracker for gateway with id '{}'", id);
    connectionTrackerThread = scheduler.scheduleWithFixedDelay(new ConnectionTrackerThread(), 30,
            CONNECTION_TRACKER_INTERVAL_SECONDS, TimeUnit.SECONDS);
}