Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:org.commonjava.cartographer.CartographerCoreBuilder.java

public CartographerCore build() throws CartoDataException {
    // this has implications on how the maven components are built, below...so it has to happen first.
    if (this.sourceManager == null) {
        final SourceManagerImpl smi = new SourceManagerImpl();
        this.sourceManager = smi;
        withLocationExpander(smi);/*from  ww w.ja  v  a 2  s .  c  o  m*/
        withLocationResolver(smi);
    }

    if (maven == null) {
        final List<Transport> transports = mavenBuilder.getEnabledTransports();
        if (transports == null || transports.isEmpty()) {
            withDefaultTransports();
        }

        try {
            mavenBuilder.initMissingComponents();
        } catch (final GalleyInitException e) {
            throw new CartoDataException("Failed to initialize missing Galley components: %s", e,
                    e.getMessage());
        }
    }

    // TODO: This needs to be replaced with a real implementation.
    if (events == null) {
        events = new NoOpCartoEventManager();
    }

    aggregatorThreads = aggregatorThreads < 2 ? 2 : aggregatorThreads;

    if (aggregatorExecutor == null) {
        aggregatorExecutor = Executors.newScheduledThreadPool(aggregatorThreads,
                new NamedThreadFactory("carto-aggregator", true, 8));
    }

    resolverThreads = resolverThreads < aggregatorThreads ? 5 * aggregatorThreads : resolverThreads;

    if (resolveExecutor == null) {
        resolveExecutor = Executors.newScheduledThreadPool(resolverThreads,
                new NamedThreadFactory("carto-graph", true, 8));
    }

    if (this.metadataScanners == null) {
        this.metadataScanners = new ArrayList<MetadataScanner>(
                Arrays.asList(new LicenseScanner(getPomReader()), new ScmUrlScanner(getPomReader())));
    }

    // TODO: Add some scanners.
    if (scannerSupport == null) {
        scannerSupport = new MetadataScannerSupport(metadataScanners);
    }

    if (this.depgraphPatchers == null) {
        this.depgraphPatchers = new ArrayList<DepgraphPatcher>();
    }

    if (patcherSupport == null) {
        this.patcherSupport = new PatcherSupport(
                this.depgraphPatchers.toArray(new DepgraphPatcher[this.depgraphPatchers.size()]));
    }

    if (mavenModelProcessor == null) {
        mavenModelProcessor = new MavenModelProcessor();
    }

    if (this.discoverer == null) {
        this.discoverer = new DiscovererImpl(mavenModelProcessor, getPomReader(), getArtifactManager(),
                patcherSupport, scannerSupport);
    }

    if (aggregator == null) {
        aggregator = new DefaultGraphAggregator(discoverer, aggregatorExecutor);
    }

    if (presetSelector == null) {
        presetSelector = new PresetSelector(
                Arrays.<PresetFactory>asList(new BuildRequirementProjectsFilterFactory(),
                        new ScopeWithEmbeddedProjectsFilterFactory(), new ScopedProjectFilterFactory()));
    }

    if (mutatorSelector == null) {
        mutatorSelector = new MutatorSelector(
                Arrays.asList(new ManagedDependencyGraphMutatorFactory(), new NoOpGraphMutatorFactory()));
    }

    if (dtoResolver == null) {
        dtoResolver = new RecipeResolver(getLocationResolver(), getLocationExpander(), sourceManager,
                getPomReader(), presetSelector, mutatorSelector);
    }

    withStandardObjectMapperModules();

    logger.debug("Object mapper: {}", objectMapper);

    final RelationshipGraphFactory graphFactory = new RelationshipGraphFactory(connectionFactory);

    MultiGraphCalculator calculator = new MultiGraphCalculator(graphFactory);

    GraphResolver resolver = new GraphResolver(calculator, sourceManager, discoverer, aggregator,
            getArtifactManager(), resolveExecutor, graphFactory, dtoResolver);

    final CalculationOps calculationOps = new CalculationOpsImpl(calculator, resolver);

    final ResolveOps resolveOps = new ResolveOpsImpl(sourceManager, discoverer, getArtifactManager(),
            resolveExecutor, dtoResolver, resolver);

    final GraphOps graphOps = new GraphOpsImpl(resolver);

    final GraphRenderingOps graphRenderingOps = new GraphRenderingOpsImpl(resolveOps, resolver,
            getLocationExpander(), dtoResolver);

    final MetadataOps metadataOps = new MetadataOpsImpl(getArtifactManager(), getPomReader(), scannerSupport,
            resolver, dtoResolver);

    try {
        return new CartographerCore(maven == null ? mavenBuilder.build() : maven, calculationOps, graphOps,
                graphRenderingOps, metadataOps, resolveOps, graphFactory, resolver, calculator, objectMapper);
    } catch (final GalleyInitException e) {
        throw new CartoDataException("Failed to build Galley Maven component: %s", e, e.getMessage());
    }
}

From source file:org.opennms.netmgt.dao.mock.MockEventIpcManager.java

ScheduledExecutorService getScheduler() {
    if (m_scheduler == null) {
        m_scheduler = Executors.newScheduledThreadPool(getNumSchedulerTheads(),
                new LogPreservingThreadFactory(getClass().getSimpleName(), getNumSchedulerTheads()));
    }/* w  ww .j  a va2  s .  c  o m*/
    return m_scheduler;
}

From source file:ox.softeng.burst.service.BurstService.java

private void createReportSchedulerService(Properties properties) {
    logger.debug("Creating new report scheduler");
    reportScheduler = new ReportScheduler(entityManagerFactory, properties);
    scheduleFrequency = Utils.convertToInteger("report.schedule.frequency",
            properties.getProperty("report.schedule.frequency"), 1);
    reportExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("scheduler"));
}

From source file:com.cloud.server.StatsCollector.java

private void init(Map<String, String> configs) {
    _executor = Executors.newScheduledThreadPool(6, new NamedThreadFactory("StatsCollector"));

    hostOutOfBandManagementStatsInterval = OutOfBandManagementService.SyncThreadInterval.value();
    hostStatsInterval = NumbersUtil.parseLong(configs.get("host.stats.interval"), 60000L);
    hostAndVmStatsInterval = NumbersUtil.parseLong(configs.get("vm.stats.interval"), 60000L);
    storageStatsInterval = NumbersUtil.parseLong(configs.get("storage.stats.interval"), 60000L);
    volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), -1L);
    autoScaleStatsInterval = NumbersUtil.parseLong(configs.get("autoscale.stats.interval"), 60000L);
    vmDiskStatsInterval = NumbersUtil.parseInt(configs.get("vm.disk.stats.interval"), 0);

    /* URI to send statistics to. Currently only Graphite is supported */
    String externalStatsUri = configs.get("stats.output.uri");
    if (externalStatsUri != null && !externalStatsUri.equals("")) {
        try {//from w  ww .ja v  a  2 s . co  m
            URI uri = new URI(externalStatsUri);
            String scheme = uri.getScheme();

            try {
                externalStatsType = ExternalStatsProtocol.valueOf(scheme.toUpperCase());
            } catch (IllegalArgumentException e) {
                s_logger.info(scheme
                        + " is not a valid protocol for external statistics. No statistics will be send.");
            }

            if (!StringUtils.isEmpty(uri.getHost())) {
                externalStatsHost = uri.getHost();
            }

            externalStatsPort = uri.getPort();

            if (!StringUtils.isEmpty(uri.getPath())) {
                externalStatsPrefix = uri.getPath().substring(1);
            }

            /* Append a dot (.) to the prefix if it is set */
            if (!StringUtils.isEmpty(externalStatsPrefix)) {
                externalStatsPrefix += ".";
            } else {
                externalStatsPrefix = "";
            }

            externalStatsEnabled = true;
        } catch (URISyntaxException e) {
            s_logger.debug("Failed to parse external statistics URI: " + e.getMessage());
        }
    }

    if (hostStatsInterval > 0) {
        _executor.scheduleWithFixedDelay(new HostCollector(), 15000L, hostStatsInterval, TimeUnit.MILLISECONDS);
    }

    if (hostOutOfBandManagementStatsInterval > 0) {
        _executor.scheduleWithFixedDelay(new HostOutOfBandManagementStatsCollector(), 15000L,
                hostOutOfBandManagementStatsInterval, TimeUnit.MILLISECONDS);
    }

    if (hostAndVmStatsInterval > 0) {
        _executor.scheduleWithFixedDelay(new VmStatsCollector(), 15000L, hostAndVmStatsInterval,
                TimeUnit.MILLISECONDS);
    }

    if (storageStatsInterval > 0) {
        _executor.scheduleWithFixedDelay(new StorageCollector(), 15000L, storageStatsInterval,
                TimeUnit.MILLISECONDS);
    }

    if (autoScaleStatsInterval > 0) {
        _executor.scheduleWithFixedDelay(new AutoScaleMonitor(), 15000L, autoScaleStatsInterval,
                TimeUnit.MILLISECONDS);
    }

    if (vmDiskStatsInterval > 0) {
        if (vmDiskStatsInterval < 300)
            vmDiskStatsInterval = 300;
        _executor.scheduleAtFixedRate(new VmDiskStatsTask(), vmDiskStatsInterval, vmDiskStatsInterval,
                TimeUnit.SECONDS);
    }

    //Schedule disk stats update task
    _diskStatsUpdateExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("DiskStatsUpdater"));
    String aggregationRange = configs.get("usage.stats.job.aggregation.range");
    _usageAggregationRange = NumbersUtil.parseInt(aggregationRange, 1440);
    _usageTimeZone = configs.get("usage.aggregation.timezone");
    if (_usageTimeZone == null) {
        _usageTimeZone = "GMT";
    }
    TimeZone usageTimezone = TimeZone.getTimeZone(_usageTimeZone);
    Calendar cal = Calendar.getInstance(usageTimezone);
    cal.setTime(new Date());
    long endDate = 0;
    int HOURLY_TIME = 60;
    final int DAILY_TIME = 60 * 24;
    if (_usageAggregationRange == DAILY_TIME) {
        cal.set(Calendar.HOUR_OF_DAY, 0);
        cal.set(Calendar.MINUTE, 0);
        cal.set(Calendar.SECOND, 0);
        cal.set(Calendar.MILLISECOND, 0);
        cal.roll(Calendar.DAY_OF_YEAR, true);
        cal.add(Calendar.MILLISECOND, -1);
        endDate = cal.getTime().getTime();
        _dailyOrHourly = true;
    } else if (_usageAggregationRange == HOURLY_TIME) {
        cal.set(Calendar.MINUTE, 0);
        cal.set(Calendar.SECOND, 0);
        cal.set(Calendar.MILLISECOND, 0);
        cal.roll(Calendar.HOUR_OF_DAY, true);
        cal.add(Calendar.MILLISECOND, -1);
        endDate = cal.getTime().getTime();
        _dailyOrHourly = true;
    } else {
        endDate = cal.getTime().getTime();
        _dailyOrHourly = false;
    }
    if (_usageAggregationRange < UsageUtils.USAGE_AGGREGATION_RANGE_MIN) {
        s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of "
                + UsageUtils.USAGE_AGGREGATION_RANGE_MIN);
        _usageAggregationRange = UsageUtils.USAGE_AGGREGATION_RANGE_MIN;
    }
    _diskStatsUpdateExecutor.scheduleAtFixedRate(new VmDiskStatsUpdaterTask(),
            (endDate - System.currentTimeMillis()), (_usageAggregationRange * 60 * 1000),
            TimeUnit.MILLISECONDS);

}

From source file:mondrian.olap.Util.java

/**
 * Creates an {@link ScheduledExecutorService} object backed by a
 * thread pool with a fixed number of threads..
 * @param maxNbThreads Maximum number of concurrent
 * threads.//from w w w . j  av a2 s  .  c  o  m
 * @param name The name of the threads.
 * @return An scheduled executor service preconfigured.
 */
public static ScheduledExecutorService getScheduledExecutorService(final int maxNbThreads, final String name) {
    return Executors.newScheduledThreadPool(maxNbThreads, new ThreadFactory() {
        final AtomicInteger counter = new AtomicInteger(0);

        public Thread newThread(Runnable r) {
            final Thread thread = Executors.defaultThreadFactory().newThread(r);
            thread.setDaemon(true);
            thread.setName(name + '_' + counter.incrementAndGet());
            return thread;
        }
    });
}

From source file:com.all.download.manager.ScheduledExecutorServiceSingleton.java

public ScheduledExecutorService getInstance() {
    ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(THREAD_POOL_SIZE,
            new DownloaderThreadFactory());

    scheduledExecutorServiceList.add(scheduledExecutorService);

    return scheduledExecutorService;
}

From source file:com.turbospaces.api.AbstractSpaceConfiguration.java

/**
 * 1. initialize jchannel/*from  ww w.  j  a va  2s . co m*/
 * 2. initialize conversion service
 * 3. initialize mapping context
 * 4. initialize kryo
 */
@Override
@SuppressWarnings("unchecked")
public void afterPropertiesSet() throws Exception {
    logger.info("Initializing JSpace configuration: group = {}", getGroup());

    if (getJChannel() == null) {
        ClassPathResource largeClusterCfg = new ClassPathResource("turbospaces-jgroups-udp.xml");
        InputStream inputStream = largeClusterCfg.getInputStream();
        setjChannel(new JChannel(inputStream));
        inputStream.close();
    }
    getJChannel().setDiscardOwnMessages(true);

    if (getMemoryManager() == null)
        setMemoryManager(new UnsafeMemoryManager());
    if (getMappingContext() == null)
        if (applicationContext != null)
            setMappingContext(applicationContext.getBean(AbstractMappingContext.class));

    if (getListeningExecutorService() == null)
        setExecutorService(Executors.newFixedThreadPool(1 << 6, new ThreadFactoryBuilder().setDaemon(false)
                .setNameFormat("jspace-execpool-thread-%s").build()));
    if (getScheduledExecutorService() == null)
        setScheduledExecutorService(Executors.newScheduledThreadPool(1 << 2, new ThreadFactoryBuilder()
                .setDaemon(true).setNameFormat("jspace-scheduledpool-thread-%s").build()));

    Preconditions.checkState(mappingContext != null, MAPPING_CONTEXT_IS_NOT_REGISTERED);

    Collection<BasicPersistentEntity> persistentEntities = mappingContext.getPersistentEntities();
    for (BasicPersistentEntity e : persistentEntities)
        boFor(e.getType());

    if (kryo == null)
        kryo = new DecoratedKryo();
    SpaceUtility.registerSpaceClasses(this, kryo);
}

From source file:org.geowebcache.diskquota.DiskQuotaMonitor.java

private ScheduledExecutorService createCleanUpExecutor() {

    final int numCleaningThreads = quotaConfig.getMaxConcurrentCleanUps();
    log.info("Setting up disk quota periodic enforcement task");
    CustomizableThreadFactory tf = new CustomizableThreadFactory("GWC DiskQuota clean up thread-");
    tf.setThreadPriority(1 + (Thread.MAX_PRIORITY - Thread.MIN_PRIORITY) / 5);

    ScheduledExecutorService executorService = Executors.newScheduledThreadPool(numCleaningThreads, tf);

    return executorService;
}

From source file:com.alibaba.otter.manager.biz.autokeeper.impl.AutoKeeperCollector.java

@Override
public void afterPropertiesSet() throws Exception {
    collectorExecutor = Executors.newScheduledThreadPool(singleSize,
            new NamedThreadFactory("collector-thread", true));
    startCollect();/*from   ww  w.j  a v  a 2s.  co m*/
}

From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java

SQLPaxosLogger(int id, String strID, String dbPath, PaxosMessenger<?> messenger) {
    super(id, dbPath, messenger);
    this.strID = strID;
    GC = Executors.newScheduledThreadPool(2, new ThreadFactory() {
        @Override//from ww  w.j a  v  a2  s  . c  om
        public Thread newThread(Runnable r) {
            Thread thread = Executors.defaultThreadFactory().newThread(r);
            thread.setName(SQLPaxosLogger.class.getSimpleName() + ":" + strID);
            return thread;
        }
    }); // new Timer(strID);
    addDerbyLogger(this);
    this.journaler = new Journaler(this.logDirectory, this.strID/* this.myID */);
    this.deleteTmpJournalFiles();

    this.mapDB = USE_MAP_DB
            ? new MapDBContainer(DBMaker.fileDB(new File(this.getLogIndexDBPrefix())).make(),
                    DBMaker.memoryDB().transactionDisable().make())
            : null;

    Diskable<String, LogIndex> disk = new Diskable<String, LogIndex>() {

        @Override
        public Set<String> commit(Map<String, LogIndex> toCommit) throws IOException {
            return SQLPaxosLogger.this.pauseLogIndex(toCommit);
        }

        @Override
        public LogIndex restore(String key) throws IOException {
            return SQLPaxosLogger.this.unpauseLogIndex(key);
        }

        public String toString() {
            return MessageLogDiskMap.class.getSimpleName() + SQLPaxosLogger.this.strID;
        }

    };
    this.messageLog = USE_MAP_DB ? new MessageLogMapDB(this.mapDB.inMemory, this.mapDB.onDisk, disk)
            : USE_DISK_MAP ? new MessageLogDiskMap(disk) : new MessageLogPausable(disk);

    // will set up db, connection, tables, etc. as needed
    if (!initialize(true))
        throw new RuntimeException("Unable to initiate " + PaxosManager.class.getSimpleName() + " for " + id);
    ;
}