Example usage for java.util.concurrent ScheduledExecutorService scheduleAtFixedRate

List of usage examples for java.util.concurrent ScheduledExecutorService scheduleAtFixedRate

Introduction

In this page you can find the example usage for java.util.concurrent ScheduledExecutorService scheduleAtFixedRate.

Prototype

public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit);

Source Link

Document

Submits a periodic action that becomes enabled first after the given initial delay, and subsequently with the given period; that is, executions will commence after initialDelay , then initialDelay + period , then initialDelay + 2 * period , and so on.

Usage

From source file:org.apache.pulsar.common.stats.JvmMetrics.java

public JvmMetrics(ScheduledExecutorService executor, String componentName, JvmGCMetricsLogger gcLogger) {
    this.gcLogger = gcLogger;
    if (executor != null) {
        executor.scheduleAtFixedRate(gcLogger::refresh, 0, 1, TimeUnit.MINUTES);
    }/*  w  w w . j av  a 2  s . c om*/
    this.componentName = componentName;
}

From source file:be.vlaanderen.sesam.monitor.internal.util.ThreadPoolTaskScheduler.java

public ScheduledFuture scheduleAtFixedRate(Runnable task, long period) {
    ScheduledExecutorService executor = getScheduledExecutor();
    try {/*w ww .  j av  a2s  .  c  o  m*/
        return executor.scheduleAtFixedRate(errorHandlingTask(task, true), 0, period, TimeUnit.MILLISECONDS);
    } catch (RejectedExecutionException ex) {
        throw new TaskRejectedException("Executor [" + executor + "] did not accept task: " + task, ex);
    }
}

From source file:com.netflix.curator.framework.imps.TestTempFramework.java

@Test
public void testInactivity() throws Exception {
    final CuratorTempFrameworkImpl client = (CuratorTempFrameworkImpl) CuratorFrameworkFactory.builder()
            .connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1))
            .buildTemp(1, TimeUnit.SECONDS);
    try {/*from  ww  w  .  j  ava 2 s  .c  o m*/
        ScheduledExecutorService service = Executors.newScheduledThreadPool(1);
        Runnable command = new Runnable() {
            @Override
            public void run() {
                client.updateLastAccess();
            }
        };
        service.scheduleAtFixedRate(command, 10, 10, TimeUnit.MILLISECONDS);
        client.inTransaction().create().forPath("/foo", "data".getBytes()).and().commit();
        service.shutdownNow();
        Thread.sleep(2000);

        Assert.assertNull(client.getCleanup());
        Assert.assertNull(client.getClient());
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:com.amazonaws.services.dynamodbv2.streamsadapter.functionals.CorrectnessTest.java

/**
 * This test spawns a thread to periodically write items to the source table. It shuts down and restarts the KCL
 * worker while writes are happening (to simulate the real-world situation of a worker dying and another taking its
 * place). There are two things being verified here:
 * 1. New KCL worker resumes from the checkpoint
 * 2. All stream records are processed//from w ww  .j  a  v a 2  s  .c o m
 *
 * @throws Exception
 */
@Test
public void workerFailureTest() throws Exception {
    LOG.info("Starting single shard KCL worker failure test.");

    KinesisClientLibConfiguration workerConfig = new KinesisClientLibConfiguration(leaseTable, streamId,
            credentials, KCL_WORKER_ID).withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    startKCLWorker(workerConfig);

    // A thread that keeps writing to the table every 2 seconds
    ScheduledExecutorService loadGeneratorService = Executors.newSingleThreadScheduledExecutor();
    loadGeneratorService.scheduleAtFixedRate(new Runnable() {

        @Override
        public void run() {
            insertAndUpdateItems(1);
        }
    }, 0/* initialDelay */, 2/* period */, TimeUnit.SECONDS);

    while (recordProcessorFactory.getNumRecordsProcessed() < 10) {
        LOG.info("Sleep till first few records are processed");
        Thread.sleep(THREAD_SLEEP_2S);
    }

    shutDownKCLWorker();

    // Calculate number of records processed by first worker and also the number of processed-but-not-checkpointed
    // records, since checkpoint happens after every batch of 10 records
    int numRecordsProcessedByFirstWorker = recordProcessorFactory.getNumRecordsProcessed();
    int numRecordsNotCheckpointed = numRecordsProcessedByFirstWorker
            % ReplicatingRecordProcessor.CHECKPOINT_BATCH_SIZE;

    // Start a new worker
    startKCLWorker(workerConfig);

    while (recordProcessorFactory.getNumRecordsProcessed() < 0) {
        LOG.info("Sleep till RecordProcessor is initialized");
        Thread.sleep(THREAD_SLEEP_2S);
    }

    loadGeneratorService.shutdown();

    if (!loadGeneratorService.awaitTermination(THREAD_SLEEP_5S, TimeUnit.MILLISECONDS)) {
        loadGeneratorService.shutdownNow();
    }

    int numStreamRecords = 2 * this.numItemsInSrcTable;
    int remainingRecordsToBeProcessed = numStreamRecords - numRecordsProcessedByFirstWorker
            + numRecordsNotCheckpointed;

    /*
     * The second worker must process atleast remainingRecordsToBeProcessed
     * num of records so that we have replicated everything to destination
     * table. Thus, this should never technically end up as an infinite
     * loop. If it does, something else is gone wrong.
     */
    while (recordProcessorFactory.getNumRecordsProcessed() < remainingRecordsToBeProcessed) {
        LOG.info("Sleep till remaining records are processed");
        Thread.sleep(THREAD_SLEEP_2S);
    }

    shutDownKCLWorker();

    ScanResult srcTableScan = TestUtil.scanTable(dynamoDBClient, srcTable);
    ScanResult destTableScan = TestUtil.scanTable(dynamoDBClient, destTable);
    assertEquals(srcTableScan.getItems(), destTableScan.getItems());
}

From source file:org.apache.zeppelin.flink.sql.AbstractStreamSqlJob.java

public InterpreterResult run(String st) {
    try {/*from w w  w . j a  v a2 s .c o  m*/
        checkLocalProperties(context.getLocalProperties());

        int parallelism = Integer
                .parseInt(context.getLocalProperties().getOrDefault("parallelism", defaultParallelism + ""));
        this.stEnv.getConfig().getConf().setInteger(TableConfigOptions.SQL_RESOURCE_DEFAULT_PARALLELISM,
                parallelism);

        Table table = stEnv.sqlQuery(st);
        this.schema = removeTimeAttributes(table.getSchema());
        checkTableSchema(schema);

        LOGGER.info("ResultTable Schema: " + this.schema);
        final DataType outputType = DataTypes.createRowTypeV2(schema.getFieldTypes(), schema.getFieldNames());

        // create socket stream iterator
        final DataType socketType = DataTypes.createTupleType(DataTypes.BOOLEAN, outputType);
        final TypeSerializer<Tuple2<Boolean, Row>> serializer = DataTypes.createExternalSerializer(socketType);

        // pass gateway port and address such that iterator knows where to bind to
        iterator = new SocketStreamIterator<>(0,
                InetAddress.getByName(RemoteInterpreterUtils.findAvailableHostAddress()), serializer);

        // create table sink
        // pass binding address and port such that sink knows where to send to
        LOGGER.debug("Collecting data at address: " + iterator.getBindAddress() + ":" + iterator.getPort());
        CollectStreamTableSink collectTableSink = new CollectStreamTableSink(iterator.getBindAddress(),
                iterator.getPort(), serializer);
        table.writeToSink(collectTableSink);
        //table.print();

        long delay = 1000L;
        long period = Long.parseLong(context.getLocalProperties().getOrDefault("refreshInterval", "3000"));

        ScheduledExecutorService refreshScheduler = Executors.newScheduledThreadPool(1);
        refreshScheduler.scheduleAtFixedRate(new RefreshTask(context), delay, period, MILLISECONDS);

        ResultRetrievalThread retrievalThread = new ResultRetrievalThread(refreshScheduler);
        retrievalThread.start();

        if (this.savePointPath == null) {
            if (this.context.getConfig().containsKey("savepointPath")) {
                this.savePointPath = this.context.getConfig().get("savepointPath").toString();
                LOGGER.info("Find savePointPath {} from paragraph config.", this.savePointPath);
            }
        }

        JobExecutionResult jobExecutionResult = null;
        if (this.savePointPath != null && Boolean
                .parseBoolean(context.getLocalProperties().getOrDefault("runWithSavePoint", "true"))) {
            LOGGER.info("Run job from savePointPath: " + savePointPath + ", parallelism: " + parallelism);
            jobExecutionResult = stEnv.execute(st);
            //TODO(zjffdu) TableEnvironment has its own entry point to submitting jobs.
            //jobExecutionResult = stEnv.execute(st, SavepointRestoreSettings.forPath(savePointPath));
        } else {
            LOGGER.info("Run job without savePointPath, " + ", parallelism: " + parallelism);
            jobExecutionResult = stEnv.execute(st);
        }
        LOGGER.info("Flink Job is finished");
        return new InterpreterResult(InterpreterResult.Code.SUCCESS);
    } catch (Exception e) {
        LOGGER.error("Fail to run stream sql job", e);
        if (e.getCause() instanceof JobCancellationException) {
            return new InterpreterResult(InterpreterResult.Code.ERROR,
                    ExceptionUtils.getStackTrace(e.getCause()));
        }
        return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
    }
}

From source file:org.wso2.carbon.device.mgt.etc.controlqueue.mqtt.MqttSubscriber.java

/**
 * Callback method which is triggered once the MQTT client losers its connection to the broker.
 * A scheduler thread is spawned to continuously re-attempt and connect to the broker and
 * subscribe to the device's topic. This thread is scheduled to execute after every break
 * equal to that of the 'reConnectionInterval' of the MQTTClient.
 *
 * @param throwable a Throwable Object containing the details as to why the failure occurred.
 *//*  ww w .  ja v  a 2  s.  com*/
@Override
public void connectionLost(Throwable throwable) {
    log.warn("Lost Connection for client: " + this.clientId + " to " + this.mqttBrokerEndPoint
            + ".\nThis was due to - " + throwable.getMessage());

    Runnable reSubscriber = new Runnable() {
        @Override
        public void run() {
            if (!isConnected()) {
                if (log.isDebugEnabled()) {
                    log.debug("Subscriber reconnecting to queue........");
                }
                try {
                    connectAndSubscribe();
                } catch (DeviceManagementException e) {
                    if (log.isDebugEnabled()) {
                        log.debug("Could not reconnect and subscribe to ControlQueue.");
                    }
                }
            } else {
                return;
            }
        }
    };

    ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor();
    service.scheduleAtFixedRate(reSubscriber, 0, this.reConnectionInterval, TimeUnit.SECONDS);
}

From source file:com.ms.commons.udas.impl.commons.MemcachedKeyStore.java

@SuppressWarnings("unused")
private void createThreadPool() {
    int periodInSenconds = 600;// 
    String period = System.getProperty("memcached.key.flush.time");
    logger.error("memcached.key.flush.time period : " + period);
    if (period != null) {
        int temp = NumberUtils.toInt(period);
        if (temp > 0) {
            periodInSenconds = temp;/*  w w w  . ja v a 2  s . com*/
        }
    }
    logger.error("Used: memcached.key.flush.time " + periodInSenconds + " Sendconds");

    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    executor.scheduleAtFixedRate(new Runnable() {

        @Override
        public void run() {
            flushKeyIfNecesery();
        }
    }, 1, periodInSenconds, TimeUnit.SECONDS);
}

From source file:outfox.dict.contest.service.CrontabService.java

@PostConstruct
public void init() {
    ScheduledExecutorService executor = Executors.newScheduledThreadPool(ContestConsts.CORE_POOL_SIZE);
    executor.scheduleAtFixedRate(new SingerInfoUpdate(), 0, ContestConsts.CRONTAB_TIME, TimeUnit.DAYS);
    // ???Zzzz/*from   w w  w.j a va  2s  . co  m*/
    // loadingPreparedBarrage();
}

From source file:io.soabase.sql.attributes.SqlDynamicAttributesFactory.java

@Override
public SoaDynamicAttributes build(Environment environment, List<String> scopes) {
    SqlSession sqlSession = SoaBundle.getFeatures(environment).getNamedRequired(SqlSession.class, sessionName);

    final SqlDynamicAttributes dynamicAttributes = new SqlDynamicAttributes(sqlSession, scopes);
    ScheduledExecutorService service = environment.lifecycle()
            .scheduledExecutorService("SoaDynamicAttributes-%d", true).build();
    Runnable command = new Runnable() {
        @Override//from   w  w w  . ja v a 2s .c  o m
        public void run() {
            dynamicAttributes.update();
        }
    };
    service.scheduleAtFixedRate(command, refreshPeriodSeconds, refreshPeriodSeconds, TimeUnit.SECONDS);
    return dynamicAttributes;
}

From source file:com.eviware.loadui.impl.conversion.ReferenceToFileConverter.java

public ReferenceToFileConverter(AddressableRegistry addressableRegistry,
        ScheduledExecutorService executorService) {
    addressableRegistry.addEventListener(CollectionEvent.class, this);
    executorService.scheduleAtFixedRate(new RemoveOldFilesTask(), 5, 5, TimeUnit.MINUTES);

    if (!storage.isDirectory())
        if (!storage.mkdirs())
            throw new RuntimeException("Unable to create path: " + storage.getAbsolutePath());
}