List of usage examples for java.util.concurrent Executors newSingleThreadScheduledExecutor
public static ScheduledExecutorService newSingleThreadScheduledExecutor(ThreadFactory threadFactory)
From source file:com.datatorrent.contrib.hdht.HDHTReader.java
@Override public void setup(OperatorContext context) { this.store.init(); if (queryExecutor == null) { queryExecutor = Executors.newSingleThreadScheduledExecutor( new NameableThreadFactory(this.getClass().getSimpleName() + "-Reader")); }//from w w w . java 2 s . com }
From source file:net.centro.rtb.monitoringcenter.metrics.system.os.OperatingSystemMetricSet.java
public OperatingSystemMetricSet() { this.operatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean(); this.rootFilePath = new File("/"); // Set up iowait retrieval job if needed Double ioWaitPercentage = fetchIoWaitPercentage(); if (ioWaitPercentage != null) { this.ioWaitPercentageHolder = new AtomicReference<>(ioWaitPercentage); this.executorService = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setNameFormat("OperatingSystemMetricSet-%d").build()); this.executorService.scheduleWithFixedDelay(new Runnable() { @Override//from www. jav a2 s .c om public void run() { Double ioWaitPercentage = fetchIoWaitPercentage(); if (ioWaitPercentage != null) { ioWaitPercentageHolder.set(ioWaitPercentage); } } }, 5, 5, TimeUnit.SECONDS); } // ----- Init and assign metrics ----- this.metricsByNames = new HashMap<>(); // Available everywhere this.availableLogicalProcessorsGauge = new Gauge<Integer>() { @Override public Integer getValue() { return operatingSystemMXBean.getAvailableProcessors(); } }; metricsByNames.put("availableLogicalProcessors", availableLogicalProcessorsGauge); if (operatingSystemMXBean.getSystemLoadAverage() >= 0) { // Where available this.systemLoadAverageGauge = new Gauge<Double>() { @Override public Double getValue() { return operatingSystemMXBean.getSystemLoadAverage(); } }; metricsByNames.put("systemLoadAverage", systemLoadAverageGauge); this.systemLoadAveragePerLogicalProcessorGauge = new Gauge<Double>() { @Override public Double getValue() { return operatingSystemMXBean.getSystemLoadAverage() / operatingSystemMXBean.getAvailableProcessors(); } }; metricsByNames.put("systemLoadAveragePerLogicalProcessor", systemLoadAveragePerLogicalProcessorGauge); } // Sun JVMs, incl. OpenJDK if (com.sun.management.OperatingSystemMXBean.class.isAssignableFrom(operatingSystemMXBean.getClass())) { final com.sun.management.OperatingSystemMXBean sunOsMxBean = com.sun.management.OperatingSystemMXBean.class .cast(operatingSystemMXBean); if (sunOsMxBean.getProcessCpuLoad() >= 0) { this.jvmCpuBusyPercentageGauge = new Gauge<Double>() { @Override public Double getValue() { return sunOsMxBean.getProcessCpuLoad() * 100; } }; metricsByNames.put("jvmCpuBusyPercentage", jvmCpuBusyPercentageGauge); } if (sunOsMxBean.getSystemCpuLoad() >= 0) { this.systemCpuBusyPercentageGauge = new Gauge<Double>() { @Override public Double getValue() { return sunOsMxBean.getSystemCpuLoad() * 100; } }; metricsByNames.put("systemCpuBusyPercentage", systemCpuBusyPercentageGauge); } if (sunOsMxBean.getCommittedVirtualMemorySize() >= 0) { this.committedVirtualMemorySizeInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return sunOsMxBean.getCommittedVirtualMemorySize(); } }; metricsByNames.put("committedVirtualMemorySizeInBytes", committedVirtualMemorySizeInBytesGauge); } // Physical Memory String physicalMemoryNamespace = "physicalMemory"; this.totalPhysicalMemorySizeInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return sunOsMxBean.getTotalPhysicalMemorySize(); } }; metricsByNames.put(MetricNamingUtil.join(physicalMemoryNamespace, "totalInBytes"), totalPhysicalMemorySizeInBytesGauge); this.freePhysicalMemorySizeInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return sunOsMxBean.getFreePhysicalMemorySize(); } }; metricsByNames.put(MetricNamingUtil.join(physicalMemoryNamespace, "freeInBytes"), freePhysicalMemorySizeInBytesGauge); this.usedPhysicalMemoryPercentageGauge = new Gauge<Double>() { @Override public Double getValue() { long totalPhysicalMemorySize = sunOsMxBean.getTotalPhysicalMemorySize(); if (totalPhysicalMemorySize == 0) { return 0.0; } long usedPhysicalMemorySize = totalPhysicalMemorySize - sunOsMxBean.getFreePhysicalMemorySize(); return Double.valueOf(usedPhysicalMemorySize) / totalPhysicalMemorySize * 100; } }; metricsByNames.put(MetricNamingUtil.join(physicalMemoryNamespace, "usedPercentage"), usedPhysicalMemoryPercentageGauge); // Swap Space String swapSpaceNamespace = "swapSpace"; this.totalSwapSpaceSizeInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return sunOsMxBean.getTotalSwapSpaceSize(); } }; metricsByNames.put(MetricNamingUtil.join(swapSpaceNamespace, "totalInBytes"), totalSwapSpaceSizeInBytesGauge); this.freeSwapSpaceSizeInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return sunOsMxBean.getFreeSwapSpaceSize(); } }; metricsByNames.put(MetricNamingUtil.join(swapSpaceNamespace, "freeInBytes"), freeSwapSpaceSizeInBytesGauge); this.usedSwapSpacePercentageGauge = new Gauge<Double>() { @Override public Double getValue() { long totalSwapSpaceSize = sunOsMxBean.getTotalSwapSpaceSize(); if (totalSwapSpaceSize == 0) { return 0.0; } long usedSwapSpaceSize = totalSwapSpaceSize - sunOsMxBean.getFreeSwapSpaceSize(); return Double.valueOf(usedSwapSpaceSize) / totalSwapSpaceSize * 100; } }; metricsByNames.put(MetricNamingUtil.join(swapSpaceNamespace, "usedPercentage"), usedSwapSpacePercentageGauge); } // File descriptors (e.g., sockets) String fileDescriptorsNamespace = "fileDescriptors"; if (UnixOperatingSystemMXBean.class.isAssignableFrom(operatingSystemMXBean.getClass())) { final UnixOperatingSystemMXBean unixOsMxBean = UnixOperatingSystemMXBean.class .cast(operatingSystemMXBean); this.maxFileDescriptorsGauge = new Gauge<Long>() { @Override public Long getValue() { return unixOsMxBean.getMaxFileDescriptorCount(); } }; metricsByNames.put(MetricNamingUtil.join(fileDescriptorsNamespace, "max"), maxFileDescriptorsGauge); this.openFileDescriptorsGauge = new Gauge<Long>() { @Override public Long getValue() { return unixOsMxBean.getOpenFileDescriptorCount(); } }; metricsByNames.put(MetricNamingUtil.join(fileDescriptorsNamespace, "open"), openFileDescriptorsGauge); this.usedFileDescriptorsPercentageGauge = new Gauge<Double>() { @Override public Double getValue() { long maxFileDescriptors = unixOsMxBean.getMaxFileDescriptorCount(); if (maxFileDescriptors == 0) { return 0.0; } return Double.valueOf(unixOsMxBean.getOpenFileDescriptorCount()) / maxFileDescriptors * 100; } }; metricsByNames.put(MetricNamingUtil.join(fileDescriptorsNamespace, "usedPercentage"), usedFileDescriptorsPercentageGauge); } // Disk space String diskSpaceNamespace = "diskSpace"; if (rootFilePath.getTotalSpace() > 0) { this.totalDiskSpaceInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return rootFilePath.getTotalSpace(); } }; metricsByNames.put(MetricNamingUtil.join(diskSpaceNamespace, "totalInBytes"), totalDiskSpaceInBytesGauge); this.freeDiskSpaceInBytesGauge = new Gauge<Long>() { @Override public Long getValue() { return rootFilePath.getFreeSpace(); } }; metricsByNames.put(MetricNamingUtil.join(diskSpaceNamespace, "freeInBytes"), freeDiskSpaceInBytesGauge); this.usedDiskSpacePercentageGauge = new Gauge<Double>() { @Override public Double getValue() { long totalDiskSpace = rootFilePath.getTotalSpace(); if (totalDiskSpace == 0) { return 0.0; } long usedDiskSpace = totalDiskSpace - rootFilePath.getFreeSpace(); return Double.valueOf(usedDiskSpace) / totalDiskSpace * 100; } }; metricsByNames.put(MetricNamingUtil.join(diskSpaceNamespace, "usedPercentage"), usedDiskSpacePercentageGauge); } // CPU IO Wait if (ioWaitPercentageHolder != null) { this.ioWaitPercentageGauge = new Gauge<Double>() { @Override public Double getValue() { return ioWaitPercentageHolder.get(); } }; metricsByNames.put("ioWaitPercentage", ioWaitPercentageGauge); } this.shutdown = new AtomicBoolean(false); }
From source file:org.apache.bookkeeper.bookie.LedgerStorageCheckpointTest.java
@Before public void setUp() throws Exception { LOG.info("Setting up test {}", getClass()); PowerMockito.mockStatic(Executors.class); try {// www. j a v a2 s . c o m // start zookeeper service startZKCluster(); } catch (Exception e) { LOG.error("Error setting up", e); throw e; } ScheduledExecutorService scheduledExecutorService = PowerMockito.mock(ScheduledExecutorService.class); executorController = new MockExecutorController().controlSubmit(scheduledExecutorService) .controlScheduleAtFixedRate(scheduledExecutorService, 10); PowerMockito.when(scheduledExecutorService.awaitTermination(anyLong(), any(TimeUnit.class))) .thenReturn(true); PowerMockito.when(Executors.newSingleThreadScheduledExecutor(any())).thenReturn(scheduledExecutorService); }
From source file:org.apache.s4.core.window.AbstractSlidingWindowPE.java
private AbstractSlidingWindowPE(App app, long slotDuration, TimeUnit timeUnit, int numSlots, SlotFactory<T> slotFactory, long slotCapacity) { super(app);/* w w w. jav a 2 s . c o m*/ this.numSlots = numSlots; this.slotFactory = slotFactory; this.slotCapacity = slotCapacity; if (slotDuration > 0l) { slotDurationInMilliseconds = TimeUnit.MILLISECONDS.convert(slotDuration, timeUnit); ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("SlidingWindow-" + getClass().getSimpleName()).build(); windowingTimerService = Executors.newSingleThreadScheduledExecutor(threadFactory); } else { slotDurationInMilliseconds = 0; windowingTimerService = null; } }
From source file:com.offbynull.portmapper.upnpigd.UpnpIgdController.java
/** * Constructs a UPNP-IGD controller.//from ww w . j ava 2 s .c o m * @param selfAddress address of this machine. * @param controlUrl control URL * @param serviceType service type * @param listener event listener * @throws NullPointerException if any argument other than {@code listener} is {@code null} */ public UpnpIgdController(InetAddress selfAddress, URI controlUrl, String serviceType, final UpnpIgdControllerListener listener) { Validate.notNull(selfAddress); Validate.notNull(controlUrl); Validate.notNull(serviceType); this.selfAddress = selfAddress; this.controlUrl = controlUrl; this.serviceType = serviceType; activePortsLock = new ReentrantLock(); activePorts = new HashMap<>(); scheduledPortTester = Executors.newSingleThreadScheduledExecutor( new BasicThreadFactory.Builder().daemon(false).namingPattern("upnp-port-tester").build()); if (listener != null) { scheduledPortTester.scheduleAtFixedRate(new Runnable() { @Override public void run() { // get random port mapping List<PortMappingInfo> ports; activePortsLock.lock(); try { ports = new ArrayList<>(activePorts.values()); } finally { activePortsLock.unlock(); } if (ports.isEmpty()) { return; } Random random = new Random(); PortMappingInfo oldPmi = ports.get(random.nextInt(ports.size())); // check to see if its still active boolean mappingFailed; try { PortMappingInfo newPmi = getMappingDetails(oldPmi.getExternalPort(), oldPmi.getPortType()); mappingFailed = !newPmi.getInternalClient().equals(UpnpIgdController.this.selfAddress) || newPmi.getInternalPort() != oldPmi.getInternalPort() || newPmi.getPortType() != oldPmi.getPortType(); } catch (Exception e) { mappingFailed = true; } // if it isn't, check to see that the user didn't remove it while we were testing it and notify if (mappingFailed) { activePortsLock.lock(); try { PortMappingInfo testPmi = activePorts.get(oldPmi.getExternalPort()); if (testPmi == null) { return; } if (testPmi.getInternalClient().equals(UpnpIgdController.this.selfAddress) && testPmi.getInternalPort() == oldPmi.getInternalPort() && testPmi.getPortType() == oldPmi.getPortType()) { activePorts.remove(oldPmi.externalPort); listener.mappingExpired(oldPmi); } } finally { activePortsLock.unlock(); } } } }, RANDOM_PORT_TEST_SLEEP, RANDOM_PORT_TEST_SLEEP, TimeUnit.SECONDS); } }
From source file:com.linkedin.pinot.controller.helix.SegmentStatusChecker.java
private void startThread() { LOGGER.info("Starting segment status checker"); if (_executorService == null) { _executorService = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override/* w w w .ja va 2 s . c o m*/ public Thread newThread(Runnable runnable) { Thread thread = new Thread(runnable); thread.setName("SegStatChecker"); return thread; } }); // Set up an executor that executes segment status tasks periodically _executorService.scheduleWithFixedDelay(new Runnable() { @Override public void run() { try { runSegmentMetrics(); } catch (Exception e) { LOGGER.warn("Caught exception while running segment status checker", e); } } }, SegmentCheckerDefaultIntervalSeconds, _segmentStatusIntervalSeconds, TimeUnit.SECONDS); } else { LOGGER.warn("SegmentStatusChecker already running. Attempt to start a duplicate thread"); } }
From source file:com.uber.stream.kafka.mirrormaker.controller.core.OffsetMonitor.java
public OffsetMonitor(final HelixMirrorMakerManager helixMirrorMakerManager, ControllerConf controllerConf) { this.numOffsetThread = controllerConf.getNumOffsetThread(); this.helixMirrorMakerManager = helixMirrorMakerManager; this.srcBrokerList = new ArrayList<>(); this.offsetZkString = controllerConf.getConsumerCommitZkPath().isEmpty() ? controllerConf.getSrcKafkaZkPath() : controllerConf.getConsumerCommitZkPath(); this.srcZkString = controllerConf.getSrcKafkaZkPath(); // disable monitor if SRC_KAFKA_ZK or GROUP_ID is not set if (StringUtils.isEmpty(controllerConf.getSrcKafkaZkPath()) || controllerConf.getGroupId().isEmpty()) { logger.info("Consumer GROUP_ID is not set. Offset manager is disabled"); this.refreshIntervalInSec = 0; } else {/*from ww w .j a v a 2 s.c o m*/ this.refreshIntervalInSec = controllerConf.getOffsetRefreshIntervalInSec(); } this.consumerOffsetPath = "/consumers/" + controllerConf.getGroupId() + "/offsets/"; this.refreshExecutor = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setNameFormat("topic-list-cron-%d").setDaemon(true).build()); this.cronExecutor = new ThreadPoolExecutor(numOffsetThread, numOffsetThread, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(controllerConf.getBlockingQueueSize()), new ThreadFactoryBuilder().setNameFormat("topic-offset-cron-%d").setDaemon(true).build()); this.topicList = new ArrayList<>(); this.brokerConsumer = new ConcurrentHashMap<>(); this.partitionLeader = new ConcurrentHashMap<>(); this.topicPartitionToOffsetMap = new ConcurrentHashMap<>(); this.noProgressMap = new ConcurrentHashMap<>(); }
From source file:org.apache.hadoop.yarn.server.sharedcachemanager.store.InMemorySCMStore.java
/** * The in-memory store bootstraps itself from the shared cache entries that * exist in HDFS.//from w w w . j a va 2s .com */ @Override protected void serviceInit(Configuration conf) throws Exception { this.startTime = System.currentTimeMillis(); this.initialDelayMin = getInitialDelay(conf); this.checkPeriodMin = getCheckPeriod(conf); this.stalenessMinutes = getStalenessPeriod(conf); bootstrap(conf); ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat("InMemorySCMStore").build(); scheduler = Executors.newSingleThreadScheduledExecutor(tf); super.serviceInit(conf); }
From source file:tv.arte.resteventapi.core.scheduling.RestEventScheduledThreadPoolExecutorScheduler.java
/** * @see ScheduledThreadPoolExecutor#ScheduledThreadPoolExecutor(int, ThreadFactory, RejectedExecutionHandler) *///from w w w. java2 s. c om public RestEventScheduledThreadPoolExecutorScheduler(int corePoolSize, ThreadFactory threadFactory, RejectedExecutionHandler handler, Integer secondsBetweenRescheduling, Integer maxMinutesToPrefetchScheduledEvents) { if (secondsBetweenRescheduling != null) { if (secondsBetweenRescheduling < 0) { throw new RestEventApiRuntimeException( "secondsBetweenRescheduling should be a positive integer value"); } this.secondsBetweenRescheduling = secondsBetweenRescheduling; } if (maxMinutesToPrefetchScheduledEvents != null) { if (maxMinutesToPrefetchScheduledEvents < 0) { throw new RestEventApiRuntimeException( "maxMinutesToPrefetchScheduledEvents should be a positive integer value"); } this.maxMinutesToPrefetchScheduledEvents = maxMinutesToPrefetchScheduledEvents; } this.reschedulingExecutor = Executors.newSingleThreadScheduledExecutor(threadFactory); this.reschedulingExecutor.scheduleWithFixedDelay(new ReschedulingTask(), 30, this.secondsBetweenRescheduling, TimeUnit.SECONDS); this.restEventsScheduledExecutor = new ScheduledThreadPoolExecutor(corePoolSize, threadFactory, handler); }
From source file:com.netflix.iep.http.RxHttp.java
/** * Setup the background tasks for cleaning up connections. *///from w w w .j a v a 2 s . c o m @PostConstruct public void start() { LOGGER.info("starting up backround cleanup threads"); executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() { @Override public Thread newThread(Runnable r) { Thread t = new Thread(r, "spectator-rxhttp-" + NEXT_THREAD_ID.getAndIncrement()); t.setDaemon(true); return t; } }); Runnable task = new Runnable() { @Override public void run() { try { LOGGER.debug("executing cleanup for {} clients", clients.size()); for (Map.Entry<Server, HttpClient<ByteBuf, ByteBuf>> entry : clients.entrySet()) { final Server s = entry.getKey(); if (s.isRegistered() && !serverRegistry.isStillAvailable(s)) { LOGGER.debug("cleaning up client for {}", s); clients.remove(s); entry.getValue().shutdown(); } } LOGGER.debug("cleanup complete with {} clients remaining", clients.size()); } catch (Exception e) { LOGGER.warn("connection cleanup task failed", e); } } }; final long cleanupFreq = Spectator.config().getLong("spectator.http.cleanupFrequency", 60); executor.scheduleWithFixedDelay(task, 0L, cleanupFreq, TimeUnit.SECONDS); }