List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor
public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory)
From source file:org.apache.nifi.registry.provider.flow.git.GitFlowMetaData.java
void startPushThread() { // If successfully loaded, start pushing thread if necessary. if (isEmpty(remoteToPush)) { return;//from ww w .j a v a2 s . c o m } final ThreadFactory threadFactory = new BasicThreadFactory.Builder().daemon(true) .namingPattern(getClass().getSimpleName() + " Push thread").build(); // Use scheduled fixed delay to control the minimum interval between push activities. // The necessity of executing push is controlled by offering messages to the pushQueue. // If multiple commits are made within this time window, those are pushed by a single push execution. final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(threadFactory); executorService.scheduleWithFixedDelay(() -> { final Long offeredTimestamp; try { offeredTimestamp = pushQueue.take(); } catch (InterruptedException e) { logger.warn("Waiting for push request has been interrupted due to {}", e.getMessage(), e); return; } logger.debug("Took a push request sent at {} to {}...", offeredTimestamp, remoteToPush); final PushCommand pushCommand = new Git(gitRepo).push().setRemote(remoteToPush); if (credentialsProvider != null) { pushCommand.setCredentialsProvider(credentialsProvider); } try { final Iterable<PushResult> pushResults = pushCommand.call(); for (PushResult pushResult : pushResults) { logger.debug(pushResult.getMessages()); } } catch (GitAPIException e) { logger.error(format("Failed to push commits to %s due to %s", remoteToPush, e), e); } }, 10, 10, TimeUnit.SECONDS); }
From source file:org.openecomp.sdc.be.dao.titan.TitanGraphClient.java
/** * This method will be invoked ONLY on init time in case Titan storage is * down./*from w w w . ja v a 2 s . com*/ */ private void startReconnectTask() { this.reconnectTask = new ReconnectTask(); // Initialize a single threaded scheduler this.reconnectScheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(r, "Titan-Reconnect-Task"); } }); logger.info("Scheduling reconnect task {} with interval of {} seconds", reconnectTask, reconnectInterval); reconnectFuture = this.reconnectScheduler.scheduleAtFixedRate(this.reconnectTask, 0, this.reconnectInterval, TimeUnit.SECONDS); }
From source file:com.clustercontrol.agent.custom.CommandCollector.java
@Override public void start() { // determine startup delay (using monitorId for random seed) int delay = new Random(config.getMonitorId().hashCode()).nextInt(config.getInterval()); // determine startup time ////w ww . j a v a2s .c o m // example) // delay : 15 [sec] // interval : 300 [sec] // now : 2000-1-1 00:00:10 // best startup : 2000-1-1 00:00:15 long now = HinemosTime.currentTimeMillis(); long startup = config.getInterval() * (now / config.getInterval()) + delay; startup = startup > now ? startup : startup + config.getInterval(); log.info("scheduling command collector. (" + this + ", startup = " + String.format("%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS", new Date(startup)) + ", interval = " + config.getInterval() + " [msec])"); // initialize scheduler thread _scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { private volatile int _count = 0; @Override public Thread newThread(Runnable r) { return new Thread(r, "CommandCollectorScheduler-" + _count++); } }); // start scheduler // when agent startup. this is called twice. first execution is after interval to avoid double execution. _scheduler.scheduleWithFixedDelay(this, startup - now, config.getInterval(), TimeUnit.MILLISECONDS); }
From source file:org.codice.ddf.opensearch.source.OpenSearchSource.java
private void updateScheduler() { LOGGER.debug("Setting availability poll task for {} minute(s) on Source {}", pollInterval, getId()); isAvailable = false;//from www .ja va2 s .com if (scheduler != null) { LOGGER.debug("Cancelling availability poll task on Source {}", getId()); scheduler.shutdownNow(); } scheduler = Executors.newSingleThreadScheduledExecutor( StandardThreadFactoryBuilder.newThreadFactory("openSearchSourceThread")); scheduler.scheduleWithFixedDelay(new Runnable() { private boolean availabilityCheck() { LOGGER.debug("Checking availability for source {} ", getId()); try { final WebClient client = factory.getWebClient(); final Response response = client.head(); return response != null && !(response.getStatus() >= 404 || response.getStatus() == 402); } catch (Exception e) { LOGGER.debug("Web Client was unable to connect to endpoint.", e); return false; } } @Override public void run() { isAvailable = availabilityCheck(); } }, 1, pollInterval.longValue() * 60L, TimeUnit.SECONDS); }
From source file:org.apache.pulsar.broker.PulsarService.java
public PulsarService(ServiceConfiguration config, Optional<WorkerService> functionWorkerService) { // Validate correctness of configuration PulsarConfigurationLoader.isComplete(config); state = State.Init;/* w w w. ja v a 2 s . c o m*/ this.bindAddress = ServiceConfigurationUtils.getDefaultOrConfiguredAddress(config.getBindAddress()); this.advertisedAddress = advertisedAddress(config); this.webServiceAddress = webAddress(config); this.webServiceAddressTls = webAddressTls(config); this.brokerServiceUrl = brokerUrl(config); this.brokerServiceUrlTls = brokerUrlTls(config); this.brokerVersion = PulsarBrokerVersionStringUtils.getNormalizedVersionString(); this.config = config; this.shutdownService = new MessagingServiceShutdownHook(this); this.loadManagerExecutor = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-load-manager")); this.functionWorkerService = functionWorkerService; }
From source file:com.numenta.core.service.DataService.java
@Override public void onCreate() { super.onCreate(); Log.i(TAG, "Service started"); AlarmReceiver alarm = new AlarmReceiver(); alarm.startAlarm(getApplicationContext()); // Initialize thread pools _timer = Executors.newSingleThreadScheduledExecutor(TIMER_THREAD_FACTORY); _ioThreadPool = Executors.newFixedThreadPool(IOTHREAD_POOL_SIZE, IO_THREAD_FACTORY); _workerPool = Executors.newCachedThreadPool(WORKER_THREAD_FACTORY);// Executors.newFixedThreadPool(WORKER_POOL_SIZE); // Optimize HTTP connection by keeping the HTTP connections alive and // reusing them System.getProperties().setProperty("sun.net.http.errorstream.enableBuffering", "true"); System.getProperties().setProperty("http.maxConnections", String.valueOf(HTTP_CONNECTION_POOL_SIZE)); startScheduledTasks();//from w w w . j av a2s. c o m // Start Metric Data Sync Service _dataSyncService = HTMApplication.getInstance().createDataSyncService(this); _dataSyncService.start(); // TODO: Start Notification Service, taking into account prefs _notificationService = HTMApplication.getInstance().createNotificationService(this); _notificationService.start(); }
From source file:com.numenta.core.service.GrokService.java
@Override public void onCreate() { super.onCreate(); Log.i(TAG, "Service started"); AlarmReceiver alarm = new AlarmReceiver(); alarm.startAlarm(getApplicationContext()); // Initialize thread pools _timer = Executors.newSingleThreadScheduledExecutor(TIMER_THREAD_FACTORY); _ioThreadPool = Executors.newFixedThreadPool(IOTHREAD_POOL_SIZE, IO_THREAD_FACTORY); _workerPool = Executors.newCachedThreadPool(WORKER_THREAD_FACTORY);// Executors.newFixedThreadPool(WORKER_POOL_SIZE); // Optimize HTTP connection by keeping the HTTP connections alive and // reusing them System.getProperties().setProperty("sun.net.http.errorstream.enableBuffering", "true"); System.getProperties().setProperty("http.maxConnections", String.valueOf(HTTP_CONNECTION_POOL_SIZE)); startScheduledTasks();//ww w .j a va 2 s . c om // Start Metric Data Sync Service _dataSyncService = GrokApplication.getInstance().createDataSyncService(this); _dataSyncService.start(); // TODO: Start Notification Service, taking into account prefs _notificationService = GrokApplication.getInstance().createNotificationService(this); _notificationService.start(); }
From source file:ws.wamp.jawampa.WampRouter.java
WampRouter(Map<String, RealmConfig> realms) { // Populate the realms from the configuration this.realms = new HashMap<String, Realm>(); for (Map.Entry<String, RealmConfig> e : realms.entrySet()) { Realm info = new Realm(e.getValue()); this.realms.put(e.getKey(), info); }//from w w w .ja va 2 s .co m // Create an eventloop and the RX scheduler on top of it this.eventLoop = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "WampRouterEventLoop"); t.setDaemon(true); return t; } }); this.scheduler = Schedulers.from(eventLoop); idleChannels = new HashSet<IConnectionController>(); }
From source file:gobblin.yarn.GobblinYarnAppLauncher.java
public GobblinYarnAppLauncher(Config config, YarnConfiguration yarnConfiguration) throws IOException { this.config = config; this.applicationName = config.getString(GobblinYarnConfigurationKeys.APPLICATION_NAME_KEY); this.appQueueName = config.getString(GobblinYarnConfigurationKeys.APP_QUEUE_KEY); String zkConnectionString = config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); LOGGER.info("Using ZooKeeper connection string: " + zkConnectionString); this.helixManager = HelixManagerFactory.getZKHelixManager( config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), GobblinClusterUtils.getHostname(), InstanceType.SPECTATOR, zkConnectionString); this.yarnConfiguration = yarnConfiguration; this.yarnConfiguration.set("fs.automatic.close", "false"); this.yarnClient = YarnClient.createYarnClient(); this.yarnClient.init(this.yarnConfiguration); this.fs = config.hasPath(ConfigurationKeys.FS_URI_KEY) ? FileSystem.get(URI.create(config.getString(ConfigurationKeys.FS_URI_KEY)), this.yarnConfiguration) : FileSystem.get(this.yarnConfiguration); this.closer.register(this.fs); this.applicationStatusMonitor = Executors.newSingleThreadScheduledExecutor( ExecutorsUtils.newThreadFactory(Optional.of(LOGGER), Optional.of("GobblinYarnAppStatusMonitor"))); this.appReportIntervalMinutes = config .getLong(GobblinYarnConfigurationKeys.APP_REPORT_INTERVAL_MINUTES_KEY); this.appMasterJvmArgs = config.hasPath(GobblinYarnConfigurationKeys.APP_MASTER_JVM_ARGS_KEY) ? Optional.of(config.getString(GobblinYarnConfigurationKeys.APP_MASTER_JVM_ARGS_KEY)) : Optional.<String>absent(); this.sinkLogRootDir = new Path(config.getString(GobblinYarnConfigurationKeys.LOGS_SINK_ROOT_DIR_KEY)); this.maxGetApplicationReportFailures = config .getInt(GobblinYarnConfigurationKeys.MAX_GET_APP_REPORT_FAILURES_KEY); this.emailNotificationOnShutdown = config .getBoolean(GobblinYarnConfigurationKeys.EMAIL_NOTIFICATION_ON_SHUTDOWN_KEY); }
From source file:org.apache.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl.java
public SimpleLoadManagerImpl() { scheduler = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-simple-load-manager")); this.sortedRankings.set(new TreeMap<>()); this.currentLoadReports = new HashMap<>(); this.resourceUnitRankings = new HashMap<>(); this.loadBalancingMetrics.set(Lists.newArrayList()); this.realtimeResourceQuotas.set(new HashMap<>()); this.realtimeAvgResourceQuota = new ResourceQuota(); placementStrategy = new WRRPlacementStrategy(); bundleGainsCache = new HashSet<>(); bundleLossesCache = new HashSet<>(); brokerCandidateCache = new HashSet<>(); availableBrokersCache = new HashSet<>(); brokerToNamespaceToBundleRange = new HashMap<>(); }