List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor
public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory)
From source file:com.ctriposs.r2.transport.http.client.HttpClientFactory.java
/** * Construct a new instance with a specified callback executor. * * @param callbackExecutor an optional executor to invoke user callbacks that otherwise * will be invoked by scheduler executor. * @param shutdownCallbackExecutor if true, the callback executor will be shut down when * this factory is shut down/*from w ww . j a v a 2 s . c o m*/ */ public HttpClientFactory(ExecutorService callbackExecutor, boolean shutdownCallbackExecutor) { this(FilterChains.empty(), new NioClientSocketChannelFactory( Executors.newCachedThreadPool(new NamedThreadFactory("R2 Netty IO Boss")), Executors.newCachedThreadPool(new NamedThreadFactory("R2 Netty IO Worker"))), true, Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("R2 Netty Scheduler")), true, callbackExecutor, shutdownCallbackExecutor); }
From source file:org.onosproject.store.consistent.impl.DistributedLeadershipManager.java
@Activate public void activate() { leaderMap = storageService.<String, NodeId>consistentMapBuilder().withName("onos-topic-leaders") .withSerializer(SERIALIZER).withPartitionsDisabled().build(); candidateMap = storageService.<String, List<NodeId>>consistentMapBuilder().withName("onos-topic-candidates") .withSerializer(SERIALIZER).withPartitionsDisabled().build(); leaderMap.addListener(event -> {//from ww w . j a v a2 s . c o m log.debug("Received {}", event); LeadershipEvent.Type leadershipEventType = null; if (event.type() == MapEvent.Type.INSERT || event.type() == MapEvent.Type.UPDATE) { leadershipEventType = LeadershipEvent.Type.LEADER_ELECTED; } else if (event.type() == MapEvent.Type.REMOVE) { leadershipEventType = LeadershipEvent.Type.LEADER_BOOTED; } onLeadershipEvent(new LeadershipEvent(leadershipEventType, new Leadership(event.key(), event.value().value(), event.value().version(), event.value().creationTime()))); }); candidateMap.addListener(event -> { log.debug("Received {}", event); if (event.type() != MapEvent.Type.INSERT && event.type() != MapEvent.Type.UPDATE) { log.error("Entries must not be removed from candidate map"); return; } onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.CANDIDATES_CHANGED, new Leadership( event.key(), event.value().value(), event.value().version(), event.value().creationTime()))); }); localNodeId = clusterService.getLocalNode().id(); electionRunner = Executors .newSingleThreadScheduledExecutor(groupedThreads("onos/store/leadership", "election-runner")); lockExecutor = Executors.newScheduledThreadPool(4, groupedThreads("onos/store/leadership", "election-thread-%d")); staleLeadershipPurgeExecutor = Executors.newSingleThreadScheduledExecutor( groupedThreads("onos/store/leadership", "stale-leadership-evictor")); leadershipRefresher = Executors .newSingleThreadScheduledExecutor(groupedThreads("onos/store/leadership", "refresh-thread")); clusterService.addListener(clusterEventListener); electionRunner.scheduleWithFixedDelay(this::electLeaders, 0, DELAY_BETWEEN_LEADER_LOCK_ATTEMPTS_SEC, TimeUnit.SECONDS); leadershipRefresher.scheduleWithFixedDelay(this::refreshLeaderBoard, 0, LEADERSHIP_REFRESH_INTERVAL_SEC, TimeUnit.SECONDS); listenerRegistry = new ListenerRegistry<>(); eventDispatcher.addSink(LeadershipEvent.class, listenerRegistry); log.info("Started"); }
From source file:org.openecomp.sdc.be.dao.titan.TitanGraphClient.java
public TitanGraphClient() { super();/*from www.j a v a2 s .co m*/ // Initialize a single threaded scheduler for health-check this.healthCheckScheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(r, "Titan-Health-Check-Task"); } }); healthCheckReadTimeout = ConfigurationManager.getConfigurationManager().getConfiguration() .getTitanHealthCheckReadTimeout(2); reconnectInterval = ConfigurationManager.getConfigurationManager().getConfiguration() .getTitanReconnectIntervalInSeconds(3); logger.info("** TitanGraphClient created"); }
From source file:org.springframework.cloud.stream.micrometer.DefaultDestinationPublishingMeterRegistry.java
private void start(ThreadFactory threadFactory) { if (publisher != null) { stop();//from w w w.j av a2s.c o m } publisher = Executors.newSingleThreadScheduledExecutor(threadFactory).scheduleAtFixedRate(this::publish, metricsPublisherConfig.step().toMillis(), metricsPublisherConfig.step().toMillis(), TimeUnit.MILLISECONDS); }
From source file:com.linkedin.r2.transport.http.client.HttpClientFactory.java
/** * Construct a new instance with a specified callback executor. * * @param callbackExecutor an optional executor to invoke user callbacks that otherwise * will be invoked by scheduler executor. * @param shutdownCallbackExecutor if true, the callback executor will be shut down when * this factory is shut down//from w w w .j a v a 2 s.c o m */ public HttpClientFactory(ExecutorService callbackExecutor, boolean shutdownCallbackExecutor) { this(FilterChains.empty(), new NioEventLoopGroup(0 /* use default settings */, new NamedThreadFactory("R2 Nio Event Loop")), true, Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("R2 Netty Scheduler")), true, callbackExecutor, shutdownCallbackExecutor); }
From source file:com.pinterest.pinlater.backends.redis.PinLaterRedisBackend.java
/** * Creates an instance of the PinLaterRedisBackend. * * @param configuration configuration parameters for the backend. * @param redisConfigStream stream encapsulating the Redis json config. * @param serverHostName hostname of the PinLater server. * @param serverStartTimeMillis start time of the PinLater server. *//*from w w w . j av a2 s .com*/ public PinLaterRedisBackend(PropertiesConfiguration configuration, InputStream redisConfigStream, String serverHostName, long serverStartTimeMillis) throws Exception { super(configuration, "Redis", serverHostName, serverStartTimeMillis); this.shardMap = RedisBackendUtils.buildShardMap(redisConfigStream, configuration); this.healthChecker = new HealthChecker("PinLaterRedis"); for (RedisPools redisPools : shardMap.values()) { this.healthChecker.addServer(redisPools.getHost(), redisPools.getPort(), new RedisHeartBeater(new JedisClientHelper(), redisPools.getMonitorRedisPool()), configuration.getInt("REDIS_HEALTH_CHECK_CONSECUTIVE_FAILURES", 6), configuration.getInt("REDIS_HEALTH_CHECK_CONSECUTIVE_SUCCESSES", 6), configuration.getInt("REDIS_HEALTH_CHECK_PING_INTERVAL_SECONDS", 5), true); // is live initially } // Start the JobQueueMonitor scheduled task. final int delaySeconds = configuration.getInt("BACKEND_MONITOR_THREAD_DELAY_SECONDS"); ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setDaemon(true).setNameFormat("RedisJobQueueMonitor-%d").build()); service.scheduleWithFixedDelay(new RedisQueueMonitor(shardMap, configuration, healthChecker), // Randomize initial delay to prevent all servers from running GC at the same time. delaySeconds + RANDOM.nextInt(delaySeconds), delaySeconds, TimeUnit.SECONDS); // Load queue names into memory. Silently catch exceptions to avoid failure in initialization. // If queue names are not loaded at this time, they will be retried upon requests. try { reloadQueueNames(); } catch (Exception e) { // Retry the ack. Stats.incr("init-queuenames-failure"); LOG.error("Failed to load queue names upon initialization.", e); } // Call Base class's initialization function to initialize the futurePool and dequeue // semaphoreMap. initialize(); }
From source file:com.uber.stream.kafka.chaperone.collector.reporter.DbAuditReporter.java
private DbAuditReporter(int queueSize, long timeBucketIntervalInSec, int reportFreqMsgCount, int reportFreqIntervalSec, boolean combineMetricsAmongHosts, String dbUser, String dbPass, String dbUrl, String dataTableName, String offsetTableName, int dbRetentionInHr, boolean enableRemoveOldRecord) { super(queueSize, timeBucketIntervalInSec, reportFreqMsgCount, reportFreqIntervalSec, combineMetricsAmongHosts);// w ww . j a v a 2 s.c o m ds = new BasicDataSource(); ds.setDriverClassName("com.mysql.jdbc.Driver"); ds.setUsername(dbUser); ds.setPassword(dbPass); ds.setUrl(dbUrl); REMOVED_RECORDS_COUNTER = Metrics.getRegistry().meter(getType() + ".auditReporter.removedRecordsNumber"); INSERTED_RECORDS_COUNTER = Metrics.getRegistry().meter(getType() + ".auditReporter.insertedRecordsNumber"); UPDATED_RECORDS_COUNTER = Metrics.getRegistry().meter(getType() + ".auditReporter.updatedRecordsNumber"); FAILED_TO_REMOVE_COUNTER = Metrics.getRegistry().meter(getType() + ".auditReporter.failedToRemoveNumber"); DB_REPORT_LATENCY_TIMER = Metrics.getRegistry().timer(getType() + ".auditReporter.dbReportLatencyMs"); Metrics.getRegistry().register(getType() + ".auditReporter.latestTSSeenLastInsert", new Gauge<Long>() { @Override public Long getValue() { long ret = latestTSSeenLastInsert; latestTSSeenLastInsert = 0; return ret; } }); Metrics.getRegistry().register(getType() + ".auditReporter.earliestTSSeenLastInsert", new Gauge<Long>() { @Override public Long getValue() { long ret = earliestTSSeenLastInsert; earliestTSSeenLastInsert = System.currentTimeMillis(); return ret; } }); cronExecutor = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setNameFormat(getType() + "-cron-executor-%d").build()); auditDbRetentionMs = TimeUnit.HOURS.toMillis(dbRetentionInHr); this.dataTableName = dataTableName; this.offsetTableName = offsetTableName; this.enableRemoveOldRecord = enableRemoveOldRecord; logger.info("Try to create dataTable={} and offsetTable={}", dataTableName, offsetTableName); maybeCreateTable(CREATE_DATA_TABLE_SQL, dataTableName); maybeCreateTable(CREATE_OFFSET_TABLE_SQL, offsetTableName); }
From source file:org.apache.nifi.ldap.tenants.LdapUserGroupProvider.java
@Override public void initialize(final UserGroupProviderInitializationContext initializationContext) throws AuthorizerCreationException { ldapSync = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { final ThreadFactory factory = Executors.defaultThreadFactory(); @Override/*w w w . ja v a 2 s.co m*/ public Thread newThread(Runnable r) { final Thread thread = factory.newThread(r); thread.setName(String.format("%s (%s) - background sync thread", getClass().getSimpleName(), initializationContext.getIdentifier())); return thread; } }); }
From source file:org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl.java
/** * Initializes fields which do not depend on PulsarService. initialize(PulsarService) should subsequently be called. *//*from w ww.j a v a 2s. co m*/ public ModularLoadManagerImpl() { brokerCandidateCache = new HashSet<>(); brokerToNamespaceToBundleRange = new HashMap<>(); defaultStats = new NamespaceBundleStats(); filterPipeline = new ArrayList<>(); loadData = new LoadData(); loadSheddingPipeline = new ArrayList<>(); loadSheddingPipeline.add(new OverloadShedder(conf)); preallocatedBundleToBroker = new ConcurrentHashMap<>(); scheduler = Executors .newSingleThreadScheduledExecutor(new DefaultThreadFactory("pulsar-modular-load-manager")); }
From source file:com.arpnetworking.tsdcore.sinks.PeriodicStatisticsSink.java
@SuppressWarnings("unused") // Invoked reflectively from Builder private PeriodicStatisticsSink(final Builder builder) { this(builder, Executors .newSingleThreadScheduledExecutor((runnable) -> new Thread(runnable, "PeriodStatisticsSink"))); }