Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:org.nmdp.service.epitope.dropwizard.EpitopeServiceApplication.java

/**
 * Dropwizard service runner.  Application resources are registered here.
 *///  ww w.java 2 s.  com
@Override
public void runService(final EpitopeServiceConfiguration configuration, final Environment environment)
        throws Exception {

    Injector injector = Guice.createInjector(
            new ConfigurationModule(ConfigurationBindings.class, configuration), new LocalServiceModule(),
            new ResourceModule(), new AbstractModule() {
                @Override
                protected void configure() {
                    DBI dbi = new DBIFactory().build(environment, configuration.getDataSourceFactory(),
                            "sqlite");
                    bind(DBI.class).toInstance(dbi);
                }
            });

    environment.getObjectMapper().enable(SerializationFeature.INDENT_OUTPUT)
            .setSerializationInclusion(Include.NON_NULL)
            .enable(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY);

    // todo: generalize dependency graph (latches?)
    //Runnable initializers = serial(
    //       parallel(
    //          () -> injector.getInstance(GGroupInitializer.class).loadGGroups(),
    //          () -> injector.getInstance(AlleleCodeInitializer.class).loadAlleleCodes(),
    //          serial(
    //             () -> injector.getInstance(AlleleInitializer.class).loadAlleles()),
    //             () -> injector.getInstance(ImgtImmuneGroupInitializer.class).loadAlleleScores()),
    //       parallel(
    //          () -> injector.getInstance(EpitopeService.class).buildMaps(),
    //          () -> injector.getInstance(FrequencyService.class).buildFrequencyMap(),
    //          () -> injector.getInstance(DbiAlleleCodeResolver.class).buildAlleleCodeMap()));

    Runnable initializers = serial(() -> injector.getInstance(GGroupInitializer.class).loadGGroups(),
            () -> injector.getInstance(AlleleCodeInitializer.class).loadAlleleCodes(),
            () -> injector.getInstance(AlleleInitializer.class).loadAlleles(),
            () -> injector.getInstance(ImmuneGroupInitializer.class).loadImmuneGroups(),
            () -> injector.getInstance(EpitopeService.class).buildAlleleGroupMaps(),
            () -> injector.getInstance(FrequencyService.class).buildFrequencyMap());
    //                 () -> injector.getInstance(DbiAlleleCodeResolver.class).buildAlleleCodeMap());

    long refreshMillis = injector.getInstance(Key.get(Long.class, RefreshMillis.class));

    environment.lifecycle().manage(new Managed() {
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1, r -> {
            Thread t = new Thread(r, "InitializerThread");
            t.setDaemon(true);
            return t;
        });

        @Override
        public void stop() throws Exception {
            scheduler.shutdownNow();
        }

        @Override
        public void start() throws Exception {
            Future<?> init = scheduler.submit(initializers);
            init.get();
            scheduler.scheduleAtFixedRate(initializers, refreshMillis, refreshMillis, MILLISECONDS);
        }
    });

    final AlleleResource alleleResource = injector.getInstance(AlleleResource.class);
    environment.jersey().register(alleleResource);

    final GroupResource groupResource = injector.getInstance(GroupResource.class);
    environment.jersey().register(groupResource);

    final MatchResource matchResource = injector.getInstance(MatchResource.class);
    environment.jersey().register(matchResource);

    environment.jersey().register(new org.nmdp.service.epitope.resource.impl.ExceptionMapper());

    // eriktodo: multibinder for health checks
    final GlClientHealthCheck glClientHealthCheck = injector.getInstance(GlClientHealthCheck.class);
    environment.healthChecks().register("glClient", glClientHealthCheck);
}

From source file:com.github.mrstampy.gameboot.concurrent.GameBootConcurrentConfiguration.java

/**
 * Scheduled executor service.//from  ww w. j a  va  2s.  c  om
 *
 * @return the scheduled executor service
 */
@Bean(name = GAME_BOOT_SCHEDULED_EXECUTOR)
public ScheduledExecutorService scheduledExecutorService() {
    String name = isEmpty(schedulerName) ? "GameBoot Scheduled Executor" : schedulerName;

    GameBootThreadFactory factory = new GameBootThreadFactory(name);

    return Executors.newScheduledThreadPool(schedulerPoolSize, factory);
}

From source file:org.apache.storm.localizer.AsyncLocalizer.java

@VisibleForTesting
AsyncLocalizer(Map<String, Object> conf, AdvancedFSOps ops, String baseDir,
        AtomicReference<Map<Long, LocalAssignment>> currAssignment,
        Map<Integer, LocalAssignment> portToAssignments) throws IOException {

    this.conf = conf;
    isLocalMode = ConfigUtils.isLocalMode(conf);
    fsOps = ops;//from www. java  2  s .  co  m
    localBaseDir = baseDir;
    // default cache size 10GB, converted to Bytes
    cacheTargetSize = ObjectReader
            .getInt(conf.get(DaemonConfig.SUPERVISOR_LOCALIZER_CACHE_TARGET_SIZE_MB), 10 * 1024)
            .longValue() << 20;
    // default 30 seconds. (we cache the size so it is cheap to do)
    cacheCleanupPeriod = ObjectReader
            .getInt(conf.get(DaemonConfig.SUPERVISOR_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS), 30 * 1000)
            .longValue();

    // if we needed we could make config for update thread pool size
    int threadPoolSize = ObjectReader.getInt(conf.get(DaemonConfig.SUPERVISOR_BLOBSTORE_DOWNLOAD_THREAD_COUNT),
            5);
    blobDownloadRetries = ObjectReader.getInt(conf.get(DaemonConfig.SUPERVISOR_BLOBSTORE_DOWNLOAD_MAX_RETRIES),
            3);

    execService = Executors.newScheduledThreadPool(threadPoolSize,
            new ThreadFactoryBuilder().setNameFormat("AsyncLocalizer Executor - %d").build());
    reconstructLocalizedResources();

    symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
    blobPending = new HashMap<>();
    this.currAssignment = currAssignment;

    recoverBlobReferences(portToAssignments);
}

From source file:org.helios.netty.jmx.MetricCollector.java

/**
 * Creates a new MetricCollector//from   www  .j av  a2 s .c  o  m
 * @param period The period of collection
 */
private MetricCollector(long period) {
    super();
    haveNioMXBean = ManagementFactory.getPlatformMBeanServer().isRegistered(directNio);
    this.period = period;
    try {
        ManagementFactory.getPlatformMBeanServer().registerMBean(this, OBJECT_NAME);
        scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(2, new ThreadFactory() {
            public Thread newThread(Runnable r) {
                Thread t = new Thread(r,
                        OBJECT_NAME.getKeyProperty("service") + "Thread#" + serial.incrementAndGet());
                t.setDaemon(true);
                return t;
            }
        });
        initMetricNames();
        scheduler.schedule(this, period, TimeUnit.MILLISECONDS);
        log.info("Started MetricCollector with a period of " + period + " ms.");
    } catch (Exception e) {
        throw new RuntimeException("Failed to create MetricCollector", e);
    }
}

From source file:org.onosproject.store.consistent.impl.DistributedLeadershipManager.java

@Activate
public void activate() {
    leaderMap = storageService.<String, NodeId>consistentMapBuilder().withName("onos-topic-leaders")
            .withSerializer(SERIALIZER).withPartitionsDisabled().build();
    candidateMap = storageService.<String, List<NodeId>>consistentMapBuilder().withName("onos-topic-candidates")
            .withSerializer(SERIALIZER).withPartitionsDisabled().build();

    leaderMap.addListener(event -> {/*from   www  .java  2s.c o m*/
        log.debug("Received {}", event);
        LeadershipEvent.Type leadershipEventType = null;
        if (event.type() == MapEvent.Type.INSERT || event.type() == MapEvent.Type.UPDATE) {
            leadershipEventType = LeadershipEvent.Type.LEADER_ELECTED;
        } else if (event.type() == MapEvent.Type.REMOVE) {
            leadershipEventType = LeadershipEvent.Type.LEADER_BOOTED;
        }
        onLeadershipEvent(new LeadershipEvent(leadershipEventType, new Leadership(event.key(),
                event.value().value(), event.value().version(), event.value().creationTime())));
    });

    candidateMap.addListener(event -> {
        log.debug("Received {}", event);
        if (event.type() != MapEvent.Type.INSERT && event.type() != MapEvent.Type.UPDATE) {
            log.error("Entries must not be removed from candidate map");
            return;
        }
        onLeadershipEvent(new LeadershipEvent(LeadershipEvent.Type.CANDIDATES_CHANGED, new Leadership(
                event.key(), event.value().value(), event.value().version(), event.value().creationTime())));
    });

    localNodeId = clusterService.getLocalNode().id();

    electionRunner = Executors
            .newSingleThreadScheduledExecutor(groupedThreads("onos/store/leadership", "election-runner"));
    lockExecutor = Executors.newScheduledThreadPool(4,
            groupedThreads("onos/store/leadership", "election-thread-%d"));
    staleLeadershipPurgeExecutor = Executors.newSingleThreadScheduledExecutor(
            groupedThreads("onos/store/leadership", "stale-leadership-evictor"));
    leadershipRefresher = Executors
            .newSingleThreadScheduledExecutor(groupedThreads("onos/store/leadership", "refresh-thread"));

    clusterService.addListener(clusterEventListener);

    electionRunner.scheduleWithFixedDelay(this::electLeaders, 0, DELAY_BETWEEN_LEADER_LOCK_ATTEMPTS_SEC,
            TimeUnit.SECONDS);

    leadershipRefresher.scheduleWithFixedDelay(this::refreshLeaderBoard, 0, LEADERSHIP_REFRESH_INTERVAL_SEC,
            TimeUnit.SECONDS);

    listenerRegistry = new ListenerRegistry<>();
    eventDispatcher.addSink(LeadershipEvent.class, listenerRegistry);

    log.info("Started");
}

From source file:com.addthis.hydra.data.tree.concurrent.ConcurrentTree.java

ConcurrentTree(File root, int numDeletionThreads, int cleanQSize, int maxCacheSize, int maxPageSize,
        PageFactory factory) throws Exception {
    LessFiles.initDirectory(root);/*from w  ww.  ja v a2  s. c o  m*/
    this.root = root;
    long start = System.currentTimeMillis();

    // setup metering
    meter = new Meter<>(METERTREE.values());
    for (METERTREE m : METERTREE.values()) {
        meter.addCountMetric(m, m.toString());
    }

    // create meter logging thread
    if (TreeCommonParameters.meterLogging > 0) {
        logger = new MeterFileLogger(this, root, "tree-metrics", TreeCommonParameters.meterLogging,
                TreeCommonParameters.meterLogLines);
    } else {
        logger = null;
    }
    source = new PageDB.Builder<>(root, ConcurrentTreeNode.class, maxPageSize, maxCacheSize)
            .pageFactory(factory).build();
    source.setCacheMem(TreeCommonParameters.maxCacheMem);
    source.setPageMem(TreeCommonParameters.maxPageMem);
    source.setMemSampleInterval(TreeCommonParameters.memSample);
    // create cache
    cache = new MediatedEvictionConcurrentHashMap.Builder<CacheKey, ConcurrentTreeNode>()
            .mediator(new CacheMediator(source)).maximumWeightedCapacity(cleanQSize).build();

    // get stored next db id
    idFile = new File(root, "nextID");
    if (idFile.exists() && idFile.isFile() && idFile.length() > 0) {
        nextDBID = new AtomicLong(Long.parseLong(LessBytes.toString(LessFiles.read(idFile))));
    } else {
        nextDBID = new AtomicLong(1);
    }

    // get tree root
    ConcurrentTreeNode dummyRoot = ConcurrentTreeNode.getTreeRoot(this);
    treeRootNode = dummyRoot.getOrCreateEditableNode("root");
    treeTrashNode = dummyRoot.getOrCreateEditableNode("trash");
    treeTrashNode.requireNodeDB();
    deletionThreadPool = Executors.newScheduledThreadPool(numDeletionThreads,
            new NamedThreadFactory(scope + "-deletion-", true));

    for (int i = 0; i < numDeletionThreads; i++) {
        deletionThreadPool
                .scheduleAtFixedRate(
                        new ConcurrentTreeDeletionTask(this, closed::get,
                                LoggerFactory
                                        .getLogger(ConcurrentTreeDeletionTask.class.getName() + ".Background")),
                        i, deletionThreadSleepMillis, TimeUnit.MILLISECONDS);
    }

    long openTime = System.currentTimeMillis() - start;
    log.info("dir={} root={} trash={} cache={} nextdb={} openms={}", root, treeRootNode, treeTrashNode,
            TreeCommonParameters.cleanQMax, nextDBID, openTime);
}

From source file:com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl.java

@Override
public void initialize() {
    if (partitioner == null) {
        try {//from   w  w  w.  j  a v  a2  s  .com
            partitioner = (Partitioner) Class.forName(DEFAULT_PARTITIONER_CLASS).newInstance();
        } catch (Throwable t) {
            throw new RuntimeException("Can't instantiate default partitioner " + DEFAULT_PARTITIONER_CLASS, t);
        }
    }

    if (maintainanceExecutor == null) {
        maintainanceExecutor = Executors.newScheduledThreadPool(DEFAULT_MAINTAINANCE_THREAD_COUNT,
                new ThreadFactoryBuilder().setDaemon(true).build());
        bOwnMaintainanceExecutor = true;
    }
    if (reconnectExecutor == null) {
        reconnectExecutor = Executors.newScheduledThreadPool(DEFAULT_RECONNECT_THREAD_COUNT,
                new ThreadFactoryBuilder().setDaemon(true).build());
        bOwnReconnectExecutor = true;
    }
}

From source file:de.blizzy.documentr.context.ContextConfig.java

@Bean(destroyMethod = "shutdown")
public ListeningExecutorService taskExecutor() {
    ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("Task Executor (%d)").build(); //$NON-NLS-1$
    ExecutorService executorService = Executors.newScheduledThreadPool(DocumentrConstants.TASK_EXECUTOR_THREADS,
            threadFactory);//  w  w w.  ja  v a 2  s . com
    return MoreExecutors.listeningDecorator(executorService);
}

From source file:com.amazonaws.services.kinesis.leases.impl.LeaseCoordinator.java

/**
 * Start background LeaseHolder and LeaseTaker threads.
 * @throws ProvisionedThroughputException If we can't talk to DynamoDB due to insufficient capacity.
 * @throws InvalidStateException If the lease table doesn't exist
 * @throws DependencyException If we encountered exception taking to DynamoDB
 *///from w ww.  j  av a2  s .c  om
public void start() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
    leaseRenewer.initialize();

    // 2 because we know we'll have at most 2 concurrent tasks at a time.
    leaseCoordinatorThreadPool = Executors.newScheduledThreadPool(2, LEASE_COORDINATOR_THREAD_FACTORY);

    // Taker runs with fixed DELAY because we want it to run slower in the event of performance degredation.
    leaseCoordinatorThreadPool.scheduleWithFixedDelay(new TakerRunnable(), 0L, takerIntervalMillis,
            TimeUnit.MILLISECONDS);
    // Renewer runs at fixed INTERVAL because we want it to run at the same rate in the event of degredation.
    leaseCoordinatorThreadPool.scheduleAtFixedRate(new RenewerRunnable(), 0L, renewerIntervalMillis,
            TimeUnit.MILLISECONDS);
    running = true;
}

From source file:org.mobicents.servlet.restcomm.sms.smpp.SmppService.java

private void initializeSmppConnections() {
    Configuration smppConfiguration = this.configuration.subset("smpp");

    List<Object> smppConnections = smppConfiguration.getList("connections.connection.name");

    int smppConnecsSize = smppConnections.size();
    if (smppConnecsSize == 0) {
        logger.warning("No SMPP Connections defined!");
        return;/* w ww.jav  a2s. c o m*/
    }

    for (int count = 0; count < smppConnecsSize; count++) {
        String name = smppConfiguration.getString("connections.connection(" + count + ").name");
        String systemId = smppConfiguration.getString("connections.connection(" + count + ").systemid");
        String peerIp = smppConfiguration.getString("connections.connection(" + count + ").peerip");
        int peerPort = smppConfiguration.getInt("connections.connection(" + count + ").peerport");
        SmppBindType bindtype = SmppBindType
                .valueOf(smppConfiguration.getString("connections.connection(" + count + ").bindtype"));

        if (bindtype == null) {
            logger.warning("Bindtype for SMPP name=" + name + " is not specified. Using default TRANSCEIVER");
        }

        String password = smppConfiguration.getString("connections.connection(" + count + ").password");
        String systemType = smppConfiguration.getString("connections.connection(" + count + ").systemtype");

        byte interfaceVersion = smppConfiguration
                .getByte("connections.connection(" + count + ").interfaceversion");

        byte ton = smppConfiguration.getByte("connections.connection(" + count + ").ton");
        byte npi = smppConfiguration.getByte("connections.connection(" + count + ").npi");
        String range = smppConfiguration.getString("connections.connection(" + count + ").range");

        Address address = null;
        if (ton != -1 && npi != -1 && range != null) {
            address = new Address(ton, npi, range);
        }

        int windowSize = smppConfiguration.getInt("connections.connection(" + count + ").windowsize");

        long windowWaitTimeout = smppConfiguration
                .getLong("connections.connection(" + count + ").windowwaittimeout");

        long connectTimeout = smppConfiguration.getLong("connections.connection(" + count + ").connecttimeout");
        long requestExpiryTimeout = smppConfiguration
                .getLong("connections.connection(" + count + ").requestexpirytimeout");
        long windowMonitorInterval = smppConfiguration
                .getLong("connections.connection(" + count + ").windowmonitorinterval");
        boolean logBytes = smppConfiguration.getBoolean("connections.connection(" + count + ").logbytes");
        boolean countersEnabled = smppConfiguration
                .getBoolean("connections.connection(" + count + ").countersenabled");

        long enquireLinkDelay = smppConfiguration
                .getLong("connections.connection(" + count + ").enquirelinkdelay");

        Smpp smpp = new Smpp(name, systemId, peerIp, peerPort, bindtype, password, systemType, interfaceVersion,
                address, connectTimeout, windowSize, windowWaitTimeout, requestExpiryTimeout,
                windowMonitorInterval, countersEnabled, logBytes, enquireLinkDelay);

        this.smppList.add(smpp);

        if (logger.isInfoEnabled()) {
            logger.info("creating new SMPP connection " + smpp);
        }

    }

    // for monitoring thread use, it's preferable to create your own
    // instance of an executor and cast it to a ThreadPoolExecutor from
    // Executors.newCachedThreadPool() this permits exposing thinks like
    // executor.getActiveCount() via JMX possible no point renaming the
    // threads in a factory since underlying Netty framework does not easily
    // allow you to customize your thread names
    this.executor = (ThreadPoolExecutor) Executors.newCachedThreadPool();

    // to enable automatic expiration of requests, a second scheduled
    // executor is required which is what a monitor task will be executed
    // with - this is probably a thread pool that can be shared with between
    // all client bootstraps
    this.monitorExecutor = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1,
            new ThreadFactory() {
                private AtomicInteger sequence = new AtomicInteger(0);

                @Override
                public Thread newThread(Runnable r) {
                    Thread t = new Thread(r);
                    t.setName("SmppServer-SessionWindowMonitorPool-" + sequence.getAndIncrement());
                    return t;
                }
            });

    // a single instance of a client bootstrap can technically be shared
    // between any sessions that are created (a session can go to any
    // different number of SMSCs) - each session created under a client
    // bootstrap will use the executor and monitorExecutor set in its
    // constructor - just be *very* careful with the "expectedSessions"
    // value to make sure it matches the actual number of total concurrent
    // open sessions you plan on handling - the underlying netty library
    // used for NIO sockets essentially uses this value as the max number of
    // threads it will ever use, despite the "max pool size", etc. set on
    // the executor passed in here

    // Setting expected session to be 25. May be this should be
    // configurable?
    this.clientBootstrap = new DefaultSmppClient(this.executor, 25, monitorExecutor);

    this.smppClientOpsThread = new SmppClientOpsThread(this.clientBootstrap, outboundInterface("udp").getPort(),
            smppMessageHandler);

    (new Thread(this.smppClientOpsThread)).start();

    for (Smpp smpp : this.smppList) {
        this.smppClientOpsThread.scheduleConnect(smpp);
    }

    if (logger.isInfoEnabled()) {
        logger.info("SMPP Service started");
    }
}