Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:org.nema.medical.mint.server.ServerConfig.java

private void setUpDICOM2MINT() throws IOException, URISyntaxException {
    if (!enableProcessor()) {
        return;/*w  ww.  j a  v a 2 s . com*/
    }
    dcm2MintExecutor = Executors.newScheduledThreadPool(2);
    //Create an instance of the Directory Processing Class
    final ProcessImportDir importProcessor = new ProcessImportDir(storageRootDir(), serverURI(), useXMLNotGPB(),
            deletePhysicalFiles(), forceCreate(), binaryInlineThreshold());
    final Runnable checkResponsesTask = new Runnable() {
        public void run() {
            try {
                importProcessor.handleResponses();
                importProcessor.handleSends();
            } catch (final Throwable e) {
                System.err.println("An exception occurred while uploading to the server:");
                e.printStackTrace();
            }
        }
    };
    dcm2MintExecutor.scheduleWithFixedDelay(checkResponsesTask, 1, 1, TimeUnit.SECONDS);

    final Runnable dirTraverseTask = new Runnable() {
        public void run() {
            try {
                importProcessor.processDir();
            } catch (final Throwable e) {
                System.err.println("An exception occurred while processing files:");
                e.printStackTrace();
            }
        }
    };
    dcm2MintExecutor.scheduleWithFixedDelay(dirTraverseTask, 20, 3, TimeUnit.SECONDS);
}

From source file:pt.lsts.neptus.comm.iridium.IridiumManager.java

public synchronized void start() {
    if (service != null)
        stop();/*  ww  w.  j a  v a2 s .  com*/

    ImcMsgManager.getManager().registerBusListener(this);
    service = Executors.newScheduledThreadPool(1);
    service.scheduleAtFixedRate(pollMessages, 0, 5, TimeUnit.MINUTES);
}

From source file:oz.hadoop.yarn.test.cluster.MiniYarnCluster.java

/**
 *
 * @param clusterName//  w w  w. j  a  v  a  2  s .c o m
 * @param numNodeManagers
 * @param numLocalDirs
 * @param numLogDirs
 */
public MiniYarnCluster(String clusterName, int numNodeManagers) {
    super(clusterName.replace("$", ""));
    this.resourceManager = new UnsecureResourceManager();
    this.serviceStartExecutor = Executors.newCachedThreadPool();
    this.serviceStartMonitoringExecutor = Executors
            .newScheduledThreadPool(Runtime.getRuntime().availableProcessors() / 2);
    this.appMasters = new ConcurrentHashMap<ApplicationAttemptId, Long>(16, 0.75f, 2);
    this.numLocalDirs = 1;
    this.numLogDirs = 1;
    this.prepareScriptExecutionEnv(clusterName);
    nodeManagers = new NodeManager[numNodeManagers];
}

From source file:hivemall.mix.server.MixServer.java

public void start() throws CertificateException, SSLException, InterruptedException {
    // Configure SSL.
    final SslContext sslCtx;
    if (ssl) {//from  w w w  . ja va 2  s  .  c om
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslCtx = SslContext.newServerContext(ssc.certificate(), ssc.privateKey());
    } else {
        sslCtx = null;
    }

    // configure metrics
    ScheduledExecutorService metricCollector = Executors.newScheduledThreadPool(1);
    MixServerMetrics metrics = new MixServerMetrics();
    ThroughputCounter throughputCounter = new ThroughputCounter(metricCollector, 5000L, metrics);
    if (jmx) {// register mbean
        MetricsRegistry.registerMBeans(metrics, port);
    }

    // configure initializer
    SessionStore sessionStore = new SessionStore();
    MixServerHandler msgHandler = new MixServerHandler(sessionStore, syncThreshold, scale);
    MixServerInitializer initializer = new MixServerInitializer(msgHandler, throughputCounter, sslCtx);

    Runnable cleanSessionTask = new IdleSessionSweeper(sessionStore, sessionTTLinSec * 1000L);
    ScheduledExecutorService idleSessionChecker = Executors.newScheduledThreadPool(1);
    try {
        // start idle session sweeper
        idleSessionChecker.scheduleAtFixedRate(cleanSessionTask, sessionTTLinSec + 10L, sweepIntervalInSec,
                TimeUnit.SECONDS);
        // accept connections
        acceptConnections(initializer, port, numWorkers);
    } finally {
        // release threads
        idleSessionChecker.shutdownNow();
        if (jmx) {
            MetricsRegistry.unregisterMBeans(port);
        }
        metricCollector.shutdownNow();
    }
}

From source file:sce.Main.java

private Main() {
    try {/*from  w  ww  . j  a v  a2s.c o m*/
        //load quartz.properties from src folder of this project
        prop = new Properties();
        prop.load(this.getClass().getResourceAsStream("quartz.properties"));

        //System.out.println(new java.io.File(".").getAbsolutePath());
        //create a new scheduler factory
        //schedFact = new StdSchedulerFactory();
        schedFact = new StdSchedulerFactory(prop);
        //schedFact = new StdSchedulerFactory("quartz.properties");

        //get the scheduler
        sched = schedFact.getScheduler();

        //start the scheduler
        sched.start();

        //start logging this scheduler status to db every n seconds, read from quartz.properties (schedulerLoggingPeriod)
        //Logging is stopped if the scheduler is shutdown with the shutdownScheduler method)
        scheduledThreadPool = Executors.newScheduledThreadPool(1);
        SchedulerLoggerStatus logger = new SchedulerLoggerStatus(sched,
                prop.getProperty("org.quartz.dataSource.quartzDataSource.URL"),
                prop.getProperty("org.quartz.dataSource.quartzDataSource.user"),
                prop.getProperty("org.quartz.dataSource.quartzDataSource.password"));
        scheduledThreadPool.scheduleAtFixedRate(logger, 1,
                Integer.parseInt(prop.getProperty("schedulerLoggingPeriod")), TimeUnit.SECONDS);

    } catch (SchedulerException | IOException e) {
    }
}

From source file:com.talis.hadoop.rdf.merge.IndexMergeReducer.java

@Override
public void reduce(LongWritable key, Iterable<Text> value, final Context context)
        throws IOException, InterruptedException {
    Runnable reporter = new Runnable() {
        @Override/* w  w  w. j  av a  2  s .  c o m*/
        public void run() {
            context.progress();
        }
    };
    ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
    ScheduledFuture<?> task = scheduler.scheduleAtFixedRate(reporter, 60, 60, TimeUnit.SECONDS);
    LOG.debug("Scheduled progress reporter, combining index shards");

    FileSystem shardsFs = null;
    for (Text remoteShard : value) {
        Path remote = new Path(remoteShard.toString());
        if (null == shardsFs) {
            shardsFs = FileSystem.get(remote.toUri(), context.getConfiguration());
        }
        LOG.debug("Copying shard from {} to {}", remote, localShards);
        shardsFs.copyToLocalFile(remote, localShards);
        LOG.debug("Copy complete");
    }

    Directory[] shards = getDirectories();
    LOG.debug("About to combine {} shards", shards.length);
    writer.addIndexesNoOptimize(shards);
    LOG.debug("Combined index built, terminating reporter");
    task.cancel(true);

}

From source file:com.janrain.backplane.config.BackplaneConfig.java

private Pair<String, ExecutorService> createPingTask() {
    final String label = "redis/jedis";
    ScheduledExecutorService ping = Executors.newScheduledThreadPool(1);
    ping.scheduleWithFixedDelay(new Runnable() {
        @Override//  ww  w. j  a v a 2 s  .c  o  m
        public void run() {
            com.janrain.redis.Redis.getInstance().ping(label);
        }
    }, 30, 10, TimeUnit.SECONDS);
    return new Pair<String, ExecutorService>(label, ping);
}

From source file:ai.grakn.engine.backgroundtasks.distributed.Scheduler.java

public Scheduler open() throws Exception {
    if (OPENED.compareAndSet(false, true)) {
        // Init task storage
        stateStorage = new GraknStateStorage();

        // Kafka listener
        consumer = kafkaConsumer(SCHEDULERS_GROUP);
        consumer.subscribe(Collections.singletonList(NEW_TASKS_TOPIC), new RebalanceListener(consumer));

        // Kafka writer
        producer = kafkaProducer();//from  w w w  .  ja  va 2 s . c  o  m

        // ZooKeeper client
        zkStorage = SynchronizedStateStorage.getInstance();

        waitToClose = new CountDownLatch(1);

        schedulingService = Executors.newScheduledThreadPool(1);

        LOG.debug("Scheduler started");
    } else {
        LOG.error("Scheduled already opened!");
    }

    return this;
}

From source file:com.ebay.myriad.Main.java

private void initTerminatorService(MyriadConfiguration cfg, Environment env, Injector injector) {
    LOGGER.info("Initializing Terminator");
    terminatorService = Executors.newScheduledThreadPool(1);
    terminatorService.scheduleAtFixedRate(injector.getInstance(TaskTerminator.class), 100, 2000,
            TimeUnit.MILLISECONDS);
}

From source file:cn.vko.cache.dao.ha.FailoverHotSwapDataSourceCreator.java

@Override
public DataSource createHADataSource(DataSourceDescriptor descriptor) throws Exception {
    DataSource activeDataSource = descriptor.getTarget();
    DataSource standbyDataSource = descriptor.getStandby();
    if (activeDataSource == null && standbyDataSource == null) {
        throw new IllegalArgumentException("must have at least one data source active.");
    }//from  www.ja  v a2 s . com
    if (activeDataSource == null || standbyDataSource == null) {
        logger.warn("only one data source is available for use, so no HA support.");
        if (activeDataSource == null) {
            return standbyDataSource;
        }
        return activeDataSource;
    }

    HotSwappableTargetSource targetSource = new HotSwappableTargetSource(activeDataSource);
    ProxyFactory pf = new ProxyFactory();
    pf.setInterfaces(new Class[] { DataSource.class });
    pf.setTargetSource(targetSource);

    if (isPositiveFailoverEnable()) {
        DataSource targetDetectorDataSource = descriptor.getTarget();
        DataSource standbyDetectorDataSource = descriptor.getStandby();
        if (targetDetectorDataSource == null || standbyDetectorDataSource == null) {
            throw new IllegalArgumentException(
                    "targetDetectorDataSource or standbyDetectorDataSource can't be null if positive failover is enabled.");
        }
        // 1. create active monitoring job for failover event
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
        ExecutorService jobExecutor = Executors.newFixedThreadPool(1);
        jobExecutorRegistry.add(jobExecutor);
        FailoverMonitorJob job = new FailoverMonitorJob(jobExecutor);
        //    1.1  inject dependencies
        job.setHotSwapTargetSource(targetSource);
        job.setMasterDataSource(activeDataSource);
        job.setStandbyDataSource(standbyDataSource);
        job.setMasterDetectorDataSource(targetDetectorDataSource);
        job.setStandbyDetectorDataSource(standbyDetectorDataSource);
        job.setCurrentDetectorDataSource(targetDetectorDataSource);
        job.setDetectingRequestTimeout(getDetectingTimeoutThreshold());
        job.setDetectingSQL(getDetectingSql());
        job.setRecheckInterval(recheckInterval);
        job.setRecheckTimes(recheckTimes);
        //    1.2  start scheduling and keep reference for canceling and shutdown
        ScheduledFuture<?> future = scheduler.scheduleWithFixedDelay(job, initialDelay, monitorPeriod,
                TimeUnit.MILLISECONDS);
        schedulerFutures.put(future, scheduler);
    }

    if (isPassiveFailoverEnable()) {
        // 2. create data source proxy with passive event advice
        PassiveEventHotSwappableAdvice advice = new PassiveEventHotSwappableAdvice();
        advice.setRetryInterval(recheckInterval);
        advice.setRetryTimes(recheckTimes);
        advice.setDetectingSql(detectingSql);
        advice.setTargetSource(targetSource);
        advice.setMainDataSource(activeDataSource);
        advice.setStandbyDataSource(standbyDataSource);
        pf.addAdvice(advice);
    }

    return (DataSource) pf.getProxy();
}