List of usage examples for java.util.concurrent Executors newScheduledThreadPool
public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory)
From source file:io.gravitee.gateway.services.apikeyscache.ApiKeysCacheService.java
@Override protected void doStart() throws Exception { if (enabled) { super.doStart(); LOGGER.info("Overriding API key repository implementation with cached API Key repository"); DefaultListableBeanFactory beanFactory = (DefaultListableBeanFactory) ((ConfigurableApplicationContext) applicationContext .getParent()).getBeanFactory(); this.apiKeyRepository = beanFactory.getBean(ApiKeyRepository.class); LOGGER.debug("Current API key repository implementation is {}", apiKeyRepository.getClass().getName()); String[] beanNames = beanFactory.getBeanNamesForType(ApiKeyRepository.class); String oldBeanName = beanNames[0]; beanFactory.destroySingleton(oldBeanName); LOGGER.debug("Register API key repository implementation {}", ApiKeyRepositoryWrapper.class.getName()); beanFactory.registerSingleton(ApiKeyRepository.class.getName(), new ApiKeyRepositoryWrapper(this.apiKeyRepository, cache)); eventManager.subscribeForEvents(this, ReactorEvent.class); executorService = Executors.newScheduledThreadPool(threads, new ThreadFactory() { private int counter = 0; private String prefix = "apikeys-refresher"; @Override/*from w w w .j a v a2s . co m*/ public Thread newThread(Runnable r) { return new Thread(r, prefix + '-' + counter++); } }); } }
From source file:com.twitter.hbc.ClientBuilder.java
public ClientBuilder() { enableGZip = true;//from w w w . j a va 2 s .c o m name = "hosebird-client-" + clientNum.getAndIncrement(); ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("hosebird-client-io-thread-%d").build(); executorService = Executors.newSingleThreadExecutor(threadFactory); ThreadFactory rateTrackerThreadFactory = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("hosebird-client-rateTracker-thread-%d").build(); ScheduledExecutorService scheduledExecutor = Executors.newScheduledThreadPool(1, rateTrackerThreadFactory); rateTracker = new BasicRateTracker(30000, 100, true, scheduledExecutor); reconnectionManager = new BasicReconnectionManager(5); socketTimeoutMillis = 60000; connectionTimeoutMillis = 4000; schemeRegistry = SchemeRegistryFactory.createDefault(); }
From source file:org.hyperic.hq.measurement.agent.server.TopNScheduler.java
private void createSender() { sender = Executors.newScheduledThreadPool(1, new ThreadFactory() { private final AtomicLong i = new AtomicLong(0); public Thread newThread(Runnable r) { return new Thread(r, "TopNSender" + i.getAndIncrement()); }// w w w . j a v a 2s . c o m }); sender.scheduleAtFixedRate(new Runnable() { public void run() { boolean success; List<TopReport> reports = new ArrayList<TopReport>(); for (TopReport report : storage.<TopReport>getObjectsFromFolder(DATA_FOLDERNAME, MAX_BATCHSIZE)) { reports.add(report); } // If we don't have anything to send -- move along if (reports.isEmpty()) { log.debug("No TopN records were found in the storage"); return; } log.debug("Sending " + reports.size() + " TopN entries " + "to server"); success = false; try { TopNSendReport_args report = new TopNSendReport_args(); if (agentToken == null) { agentToken = storage.getValue(CommandsAPIInfo.PROP_AGENT_TOKEN); } report.setAgentToken(agentToken); report.setTopReports(reports); client.topNSendReport(report); success = true; } catch (AgentCallbackClientException exc) { log.error("Error sending TOPN data to server: " + exc.getMessage()); } // delete the records we sent from the storage if (success) { List<String> filesToDelete = new ArrayList<String>(); for (TopReport report : reports) { filesToDelete.add(String.valueOf(report.getCreateTime())); } storage.deleteObjectsFromFolder(DATA_FOLDERNAME, filesToDelete.toArray(new String[filesToDelete.size()])); } } // TimeUnit.MINUTE does not work on java5 }, SEND_INTERVAL * 60, SEND_INTERVAL * 60, TimeUnit.SECONDS); }
From source file:org.apache.stratos.throttling.agent.ThrottlingAgent.java
public ThrottlingAgent(BundleContext bundleContext) throws Exception { this.scheduler = Executors.newScheduledThreadPool(1, new ThrottlingAgentThreadFactory()); this.throttlingInfoCache = new ThrottlingInfoCache(); this.bundleContext = bundleContext; }
From source file:com.openteach.diamond.network.waverider.slave.DefaultSlaveNode.java
@Override public boolean init() { commandDispatcher.addCommandHandler(0L, new SlaveHeartbeatCommandHandler(this)); netWorkClient = new DefaultNetWorkClient(config.getMasterAddress(), config.getPort()); if (!netWorkClient.init()) { return false; }/* w w w . j a v a2 s . co m*/ masterFailureMonitor = new DefaultMasterFailureMonitor(new DefaultMasterFailureHandler(this), MasterFailureMonitor.DEFAULT_FAILURE_MONITOR_INTERVAL, MasterFailureMonitor.DEFAULT_FAILURE_MONITOR_WAIT_MASTER_STATE_TIME_OUT); if (!masterFailureMonitor.init()) { return false; } commandDispatchThread = new Thread(new CommandDispatchTask(), SLAVE_COMMAND_DISPATCHE_THREAD_NAME); commandDispatchThread.setDaemon(true); // heart beat heartbeatScheduler = Executors.newScheduledThreadPool(1, new WaveriderThreadFactory(SLAVE_HEART_BEAT_THREAD_NAME_PREFIX, null, true)); return true; }
From source file:org.wso2.carbon.cluster.coordinator.rdbms.RDBMSCoordinationStrategy.java
public RDBMSCoordinationStrategy(DataSource datasource) { ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("RDBMSCoordinationStrategy-%d") .build();//from www.j a va 2s . com this.threadExecutor = Executors.newScheduledThreadPool( CoordinationStrategyConfiguration.getInstance().getRdbmsConfigs().get("heartbeatInterval"), namedThreadFactory); this.heartBeatInterval = CoordinationStrategyConfiguration.getInstance().getRdbmsConfigs() .get(CoordinationPropertyNames.RDBMS_BASED_COORDINATION_HEARTBEAT_INTERVAL); // Maximum age of a heartbeat. After this much of time, the heartbeat is considered invalid and node is // considered to have left the cluster. this.heartbeatMaxAge = heartBeatInterval * 2; this.localNodeId = generateRandomId(); this.rdbmsMemberEventProcessor = new RDBMSMemberEventProcessor(localNodeId, datasource); this.communicationBusContext = new RDBMSCommunicationBusContextImpl(datasource); }
From source file:org.apache.flink.runtime.filecache.FileCache.java
public FileCache(String[] tempDirectories) throws IOException { Preconditions.checkNotNull(tempDirectories); storageDirectories = new File[tempDirectories.length]; for (int i = 0; i < tempDirectories.length; i++) { String cacheDirName = "flink-dist-cache-" + UUID.randomUUID().toString(); storageDirectories[i] = new File(tempDirectories[i], cacheDirName); String path = storageDirectories[i].getAbsolutePath(); if (storageDirectories[i].mkdirs()) { LOG.info("User file cache uses directory " + path); } else {//from www .j a va2 s . c om LOG.error("User file cache cannot create directory " + path); // delete all other directories we created so far for (int k = 0; k < i; k++) { if (!storageDirectories[k].delete()) { LOG.warn("User file cache cannot remove prior directory " + storageDirectories[k].getAbsolutePath()); } } throw new IOException("File cache cannot create temp storage directory: " + path); } } this.shutdownHook = createShutdownHook(this, LOG); this.entries = new HashMap<JobID, Map<String, Tuple4<Integer, File, Path, Future<Path>>>>(); this.executorService = Executors.newScheduledThreadPool(10, new ExecutorThreadFactory("flink-file-cache")); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.security.RMAppSecurityManager.java
@Override protected void serviceInit(Configuration conf) throws Exception { LOG.debug("Initializing RMAppSecurityManager"); this.conf = conf; this.handler = rmContext.getDispatcher().getEventHandler(); rmAppCertificateActions = RMAppSecurityActionsFactory.getInstance().getActor(conf); isRPCTLSEnabled = conf.getBoolean(CommonConfigurationKeys.IPC_SERVER_SSL_ENABLED, CommonConfigurationKeys.IPC_SERVER_SSL_ENABLED_DEFAULT); renewalExecutorService = Executors.newScheduledThreadPool(RENEWAL_EXECUTOR_SERVICE_POOL_SIZE, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("RMApp Security Material Renewer #%d") .build());//from ww w .ja va 2 s .c o m for (RMAppSecurityHandler handler : securityHandlersMap.values()) { handler.init(conf); } super.serviceInit(conf); }
From source file:edu.umass.cs.gigapaxos.FailureDetection.java
FailureDetection(NodeIDType id, InterfaceNIOTransport<NodeIDType, JSONObject> niot, String paxosLogFolder) { nioTransport = niot;/*from w w w.java 2s . c o m*/ myID = id; this.execpool = Executors.newScheduledThreadPool(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread thread = Executors.defaultThreadFactory().newThread(r); thread.setName(FailureDetection.class.getSimpleName() + myID); return thread; } }); lastHeardFrom = new ConcurrentHashMap<NodeIDType, Long>(); keepAliveTargets = new TreeSet<NodeIDType>(); futures = new HashMap<NodeIDType, ScheduledFuture<PingTask>>(); initialize(paxosLogFolder); }
From source file:com.adaptris.core.RetryMessageErrorHandlerImp.java
@Override public void start() throws CoreException { executor = Executors.newScheduledThreadPool(1, new ManagedThreadFactory(getClass().getSimpleName())); sweeper = executor.scheduleWithFixedDelay(new CleanupTask(), 100L, retryIntervalMs(), TimeUnit.MILLISECONDS); failAll = false;/* ww w . j ava 2 s .c om*/ super.start(); }