Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:com.janrain.backplane2.server.config.Backplane2Config.java

private ScheduledExecutorService createPingTask() {
    ScheduledExecutorService ping = Executors.newScheduledThreadPool(1);
    ping.scheduleWithFixedDelay(new Runnable() {
        @Override/*  ww  w  . j  a  v a  2  s .  c  om*/
        public void run() {
            com.janrain.redis.Redis.getInstance().ping();
        }
    }, 30, 10, TimeUnit.SECONDS);
    return ping;
}

From source file:org.osgp.adapter.protocol.dlms.application.config.DlmsConfig.java

@Bean
public ScheduledExecutorService scheduledExecutorService(
        @Value("${executor.scheduled.poolsize}") final int poolsize) {
    return Executors.newScheduledThreadPool(poolsize);
}

From source file:org.opendaylight.groupbasedpolicy.renderer.opflex.lib.OpflexConnectionServiceTest.java

@Before
public void setUp() throws Exception {
    MockitoAnnotations.initMocks(this);

    int numCPU = Runtime.getRuntime().availableProcessors();
    executor = Executors.newScheduledThreadPool(numCPU * 2);

    /*//w w w . j  a  va 2  s  . co m
     * Mocks
     */
    when(mockDataBroker.newReadOnlyTransaction()).thenReturn(mockRead);
    when(mockDataBroker.newWriteOnlyTransaction()).thenReturn(mockWrite);
    when(mockWrite.submit()).thenReturn(mockStatus);
    when(mockRead.read(LogicalDatastoreType.CONFIGURATION, OpflexConnectionService.DISCOVERY_IID))
            .thenReturn(mockOption);
    when(mockOption.get()).thenReturn(mockDao);
    when(mockDao.get()).thenReturn(dummyDefinitions);

    /*
     * Builders for creating our own discovery definitions
     */
    discoveryBuilder = new DiscoveryDefinitionsBuilder();
    eprBuilder = new EndpointRegistryBuilder();
    prBuilder = new PolicyRepositoryBuilder();
    oBuilder = new ObserverBuilder();

    int testPort = getAvailableServerPort();
    if (testPort == 0) {
        assertTrue(1 == 0);
    }
    System.setProperty(OpflexConnectionService.OPFLEX_LISTENPORT, Integer.toString(testPort));
    System.setProperty(OpflexConnectionService.OPFLEX_LISTENIP, TEST_IP);
}

From source file:com.att.nsa.cambria.backends.kafka.KafkaConsumerCache.java

/**
 * Creates a KafkaConsumerCache object. Before it is used, you must call
 * startCache()/*from   w w  w  .ja  v a2s  .  com*/
 * 
 * @param apiId
 * @param s
 * @param metrics
 */
public KafkaConsumerCache(String apiId, MetricsSet metrics) {

    if (apiId == null) {
        throw new IllegalArgumentException("API Node ID must be specified.");
    }

    fApiId = apiId;
    //   fSettings = s;
    fMetrics = metrics;
    String strkSetting_ZkBasePath = AJSCPropertiesMap.getProperty(CambriaConstants.msgRtr_prop,
            kSetting_ZkBasePath);
    if (null == strkSetting_ZkBasePath)
        strkSetting_ZkBasePath = kDefault_ZkBasePath;
    fBaseZkPath = strkSetting_ZkBasePath;

    fConsumers = new ConcurrentHashMap<String, KafkaConsumer>();
    fSweepScheduler = Executors.newScheduledThreadPool(1);

    curatorConsumerCache = null;

    status = Status.NOT_STARTED;

    listener = new ConnectionStateListener() {
        public void stateChanged(CuratorFramework client, ConnectionState newState) {
            if (newState == ConnectionState.LOST) {
                log.info("ZooKeeper connection expired");
                handleConnectionLoss();
            } else if (newState == ConnectionState.READ_ONLY) {
                log.warn("ZooKeeper connection set to read only mode.");
            } else if (newState == ConnectionState.RECONNECTED) {
                log.info("ZooKeeper connection re-established");
                handleReconnection();
            } else if (newState == ConnectionState.SUSPENDED) {
                log.warn("ZooKeeper connection has been suspended.");
                handleConnectionSuspended();
            }
        }
    };
}

From source file:com.btoddb.chronicle.plunkers.HdfsPlunkerImplIT.java

@Test
@Ignore("very flakey, need to work out a more stable way of testing")
public void testLongRun() throws Exception {
    plunker.setIdleTimeout(0);//  w w w  .  j av a2s .c om
    plunker.setRollPeriod(2);
    plunker.setTimeoutCheckPeriod(100);
    plunker.init(config);

    final int sleep = 200;
    final int maxCount = 100; // 20 seconds at 'sleep' interval should be 10 files
    final AtomicInteger count = new AtomicInteger();

    // do this to prime HDFS FileSystem object - otherwise timing is off
    plunker.handleInternal(Arrays.asList(new Event("the-body").withHeader("customer", "cust")
            .withHeader("msgId", String.valueOf(count.getAndIncrement()))));

    ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
    System.out.println("start");
    executor.scheduleWithFixedDelay(new Runnable() {
        @Override
        public void run() {
            try {
                System.out.println("time = " + System.currentTimeMillis());
                plunker.handleInternal(Arrays.asList(new Event("the-body").withHeader("customer", "cust")
                        .withHeader("msgId", String.valueOf(count.get()))));
            } catch (Exception e) {
                e.printStackTrace();
            }

            count.incrementAndGet();
        }
    }, 0, sleep, TimeUnit.MILLISECONDS);

    while (count.get() < maxCount) {
        Thread.sleep(sleep / 2);
    }

    executor.shutdown();
    executor.awaitTermination(60, TimeUnit.SECONDS);

    Thread.sleep(1500);

    plunker.shutdown();

    Event[] events = new Event[count.get()];
    for (int i = 0; i < count.get(); i++) {
        events[i] = new Event("the-body").withHeader("customer", "cust").withHeader("msgId", String.valueOf(i));
    }

    File theDir = new File(String.format("%s/the/cust/path", baseDir.getPath()));

    assertThat(theDir, ftUtils.countWithSuffix(".tmp", 0));
    assertThat(theDir, ftUtils.countWithSuffix(".avro", 10));

    assertThat(theDir, ftUtils.hasEventsInDir(events));
}

From source file:it.polimi.tower4clouds.manager.MonitoringManager.java

public MonitoringManager(ManagerConfig config) throws Exception {
    this.config = config;
    validator = new RulesValidator();

    RspKbAPI.KB_NAME_SPACE = MO.URI;
    RspKbAPI.KB_NS_PREFIX = MO.prefix;//from   w w  w .j av a  2 s.  c o  m

    knowledgeBase = new RspKbAPI(config.getDaUrl());
    dataAnalyzer = new RSP_services_csparql_API(config.getDaUrl());

    logger.info("Checking if Data Analyzer is reachable");
    NetUtil.waitForResponseCode(config.getDaUrl() + "/queries", 200, MAX_KEEP_ALIVE, 5000);
    logger.info("Resetting KB");
    knowledgeBase.clearAll();
    logger.info("Resetting DA");
    resetDA();

    registeredDCs = new ConcurrentHashMap<String, DCDescriptor>();
    dcsKeepAlive = new ConcurrentHashMap<String, Integer>();
    dcsKATimestamp = new ConcurrentHashMap<String, Long>();
    registeredResources = new ConcurrentHashMap<String, Resource>();
    resourcesKeepAlive = new ConcurrentHashMap<String, Integer>();
    resourcesKATimestamp = new ConcurrentHashMap<String, Long>();

    queryFactory = new QueryFactory();

    actionsExecutor = Executors.newFixedThreadPool(1);
    keepAliveScheduler = Executors.newScheduledThreadPool(1);
    keepAliveScheduler.scheduleAtFixedRate(new keepAliveChecker(), 0, keepAliveCheckPeriod, TimeUnit.SECONDS);

    logger.info("Uploading ontology to KB");
    knowledgeBase.putModel(MO.model, ManagerConfig.MODEL_GRAPH_NAME);

    if (config.getRdfHistoryDbIP() != null) {
        rdfHistoryDB = new RdfHistoryDBAPI(config.getRdfHistoryDbIP(), config.getRdfHistoryDbPort());
        rdfHistoryDB.setAsync(true);
    }
    // initSelfMonitoring();
}

From source file:info.raack.appliancelabeler.service.DefaultDataService.java

@PostConstruct
public void init() throws JAXBException, SAXException {
    lastDatapoints = new HashMap<String, Long>();

    // initialize email reloader daily
    scheduler = Executors.newScheduledThreadPool(1);
    scheduler.scheduleWithFixedDelay(new ReloadUserEmailRunnable(), 0, 5, TimeUnit.MINUTES);
}

From source file:org.commonreality.sensors.base.BaseSensor.java

@Override
public void start() throws Exception {
    try {/*ww  w.j a  v a 2s  . co m*/
        _realtimeClock = new RealtimeClock(getCommonReality(), Executors.newScheduledThreadPool(1));
        super.start();
        if (LOGGER.isDebugEnabled())
            LOGGER.debug("Executing committer");
        execute(_committer);
    } catch (Exception e) {
        LOGGER.error("Failed to start properly", e);
        throw e;
    }
}

From source file:com.vmware.photon.controller.common.xenon.XenonRestClientTest.java

private BasicServiceHost[] setUpMultipleHosts(Integer hostCount) throws Throwable {

    BasicServiceHost[] hosts = new BasicServiceHost[hostCount];
    InetSocketAddress[] servers = new InetSocketAddress[hostCount];
    for (Integer i = 0; i < hostCount; i++) {
        hosts[i] = BasicServiceHost.create();
        hosts[i].setMaintenanceIntervalMicros(TimeUnit.MILLISECONDS.toMicros(100));
        hosts[i].startServiceSynchronously(ExampleService.createFactory(), null, ExampleService.FACTORY_LINK);

        servers[i] = new InetSocketAddress(hosts[i].getPreferredAddress(), hosts[i].getPort());
    }/*from ww  w . j  ava  2s . c  om*/

    if (hostCount > 1) {
        // join peer node group
        BasicServiceHost host = hosts[0];
        for (int i = 1; i < hosts.length; i++) {
            BasicServiceHost peerHost = hosts[i];
            ServiceHostUtils.joinNodeGroup(peerHost, host.getUri().getHost(), host.getPort());
        }

        ServiceHostUtils.waitForNodeGroupConvergence(hosts,
                com.vmware.xenon.services.common.ServiceUriPaths.DEFAULT_NODE_GROUP,
                ServiceHostUtils.DEFAULT_NODE_GROUP_CONVERGENCE_MAX_RETRIES,
                ServiceHostUtils.DEFAULT_NODE_GROUP_CONVERGENCE_SLEEP);
    }

    StaticServerSet serverSet = new StaticServerSet(servers);
    xenonRestClient = spy(new XenonRestClient(serverSet, Executors.newFixedThreadPool(1),
            Executors.newScheduledThreadPool(1)));
    xenonRestClient.start();
    return hosts;
}

From source file:org.apache.apex.malhar.lib.db.jdbc.AbstractJdbcPollInputOperator.java

@Override
public void setup(OperatorContext context) {
    super.setup(context);
    dslContext = createDSLContext();//from  w w  w  . j a v a 2 s  .  c o  m
    if (scanService == null) {
        scanService = Executors.newScheduledThreadPool(1);
    }
    execute = true;
    emitQueue = new LinkedBlockingQueue<>(queueCapacity);
    windowManager.setup(context);
}