Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:com.ctriposs.rest4j.server.Rest4JServlet.java

private AbstractR2Servlet buildR2ServletFromServletParams(ServletConfig servletConfig) {
    ResourceFactory resourceFactory = new PrototypeResourceFactory();

    Rest4JConfig config = new Rest4JConfig();
    config.setResourcePackageNamesSet(getResourcePackageSet(servletConfig));

    final ScheduledExecutorService scheduler = Executors
            .newScheduledThreadPool(getParseqThreadPoolSize(servletConfig));
    Engine engine = new EngineBuilder().setTaskExecutor(scheduler).setTimerScheduler(scheduler).build();

    DelegatingTransportDispatcher dispatcher = new DelegatingTransportDispatcher(
            new Rest4JServer(config, resourceFactory, engine));

    boolean useAsync = getUseAsync(servletConfig);
    long asyncTimeOut = getAsyncTimeout(servletConfig);

    if (useAsync && servletConfig.getServletContext().getMajorVersion() < 3) {
        throw new IllegalArgumentException(
                "This servlet is configured with useAsync=true, but the current servlet "
                        + "context does not support the required Servlet API 3.0.");

    }//w w w  .  j  ava2s. co  m
    if (!useAsync) {
        log.info(
                "Initializing Rest4J with a thread based request handling.  Set useAsync=true on a Servlet API 3.0 container to enable Rest4J's async servlet.");
        return new RAPServlet(dispatcher);
    } else {
        log.info("Initializing Rest4J with an async request handling enabled.");
        return new AsyncR2Servlet(dispatcher, asyncTimeOut);
    }
}

From source file:com.vmware.photon.controller.common.xenon.XenonRestClientTest.java

private void setUpHostAndClient() throws Throwable {
    host = BasicServiceHost.create();//  www. j  ava  2  s . c o m
    host.startServiceSynchronously(ExampleService.createFactory(), null, ExampleService.FACTORY_LINK);
    assertThat(host.checkServiceAvailable(ExampleService.FACTORY_LINK), is(true));

    StaticServerSet serverSet = new StaticServerSet(
            new InetSocketAddress(host.getPreferredAddress(), host.getPort()));

    xenonRestClient = spy(new XenonRestClient(serverSet, Executors.newFixedThreadPool(1),
            Executors.newScheduledThreadPool(1)));
}

From source file:com.netflix.conductor.dao.index.ElasticSearchDAO.java

@Inject
public ElasticSearchDAO(Client client, Configuration config, ObjectMapper om) {
    this.om = om;
    this.client = client;
    this.indexName = config.getProperty("workflow.elasticsearch.index.name", null);

    try {//from   w  w  w.  ja va  2s . co  m

        initIndex();
        updateIndexName(config);
        Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> updateIndexName(config), 0, 1,
                TimeUnit.HOURS);

    } catch (Exception e) {
        log.error(e.getMessage(), e);
    }
}

From source file:com.netflix.conductor.dao.es5.index.ElasticSearch5DAO.java

@Inject
public ElasticSearch5DAO(Client client, Configuration config, ObjectMapper om) {
    this.om = om;
    this.client = client;
    this.indexName = config.getProperty("workflow.elasticsearch.index.name", null);

    try {/*w ww .j  a  v a2  s  .  c o  m*/

        initIndex();
        updateIndexName(config);
        Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> updateIndexName(config), 0, 1,
                TimeUnit.HOURS);

    } catch (Exception e) {
        log.error(e.getMessage(), e);
    }
}

From source file:com.woopra.tracking.android.WoopraTracker.java

public void setPingEnabled(boolean enabled) {
    this.pingEnabled = enabled;
    if (enabled) {
        if (pingScheduler == null) {
            long interval = idleTimeoutMs - 5000L;
            if (interval < 0) {
                interval /= 2;/*from w  w w  . j a  v  a 2 s . c  om*/
            }
            pingScheduler = Executors.newScheduledThreadPool(1);
            pingScheduler.scheduleAtFixedRate(new Runnable() {
                @Override
                public void run() {
                    try {
                        WoopraPing ping = new WoopraPing(domain, getVisitor().getCookie(), clientInfo,
                                idleTimeoutMs);
                        ping.ping();
                    } catch (Throwable t) {
                        Log.e(TAG, "unknown ping error", t);
                    }
                }

            }, interval, interval, TimeUnit.MILLISECONDS);
        }
    } else {
        if (pingScheduler != null) {
            pingScheduler.shutdown();
            pingScheduler = null;
        }
    }
}

From source file:com.nts.alphamale.handler.ExecutorHandler.java

public void executePeriodically(List<Runnable> rt, long period) {
    multipleScheduledExecutor = Executors.newScheduledThreadPool(rt.size());
    for (Runnable r : rt) {
        multipleScheduledExecutor.scheduleAtFixedRate(r, 3 * 1000, period, TimeUnit.MILLISECONDS);
    }//w w  w  .j  a  v  a2  s .co m
}

From source file:org.apache.hama.MiniBSPCluster.java

public MiniBSPCluster(HamaConfiguration conf, int groomServers) {
    this.configuration = conf;
    this.grooms = groomServers;
    if (1 > this.grooms) {
        this.grooms = 2;
    }/* w w w.java  2s  .  c  o  m*/
    LOG.info("Groom server number " + this.grooms);
    int threadpool = conf.getInt("bsp.test.threadpool", 10);
    LOG.info("Thread pool value " + threadpool);
    scheduler = Executors.newScheduledThreadPool(threadpool);
}

From source file:org.apache.zeppelin.flink.sql.AbstractStreamSqlJob.java

public InterpreterResult run(String st) {
    try {//  ww  w .  j  a va  2  s. c om
        checkLocalProperties(context.getLocalProperties());

        int parallelism = Integer
                .parseInt(context.getLocalProperties().getOrDefault("parallelism", defaultParallelism + ""));
        this.stEnv.getConfig().getConf().setInteger(TableConfigOptions.SQL_RESOURCE_DEFAULT_PARALLELISM,
                parallelism);

        Table table = stEnv.sqlQuery(st);
        this.schema = removeTimeAttributes(table.getSchema());
        checkTableSchema(schema);

        LOGGER.info("ResultTable Schema: " + this.schema);
        final DataType outputType = DataTypes.createRowTypeV2(schema.getFieldTypes(), schema.getFieldNames());

        // create socket stream iterator
        final DataType socketType = DataTypes.createTupleType(DataTypes.BOOLEAN, outputType);
        final TypeSerializer<Tuple2<Boolean, Row>> serializer = DataTypes.createExternalSerializer(socketType);

        // pass gateway port and address such that iterator knows where to bind to
        iterator = new SocketStreamIterator<>(0,
                InetAddress.getByName(RemoteInterpreterUtils.findAvailableHostAddress()), serializer);

        // create table sink
        // pass binding address and port such that sink knows where to send to
        LOGGER.debug("Collecting data at address: " + iterator.getBindAddress() + ":" + iterator.getPort());
        CollectStreamTableSink collectTableSink = new CollectStreamTableSink(iterator.getBindAddress(),
                iterator.getPort(), serializer);
        table.writeToSink(collectTableSink);
        //table.print();

        long delay = 1000L;
        long period = Long.parseLong(context.getLocalProperties().getOrDefault("refreshInterval", "3000"));

        ScheduledExecutorService refreshScheduler = Executors.newScheduledThreadPool(1);
        refreshScheduler.scheduleAtFixedRate(new RefreshTask(context), delay, period, MILLISECONDS);

        ResultRetrievalThread retrievalThread = new ResultRetrievalThread(refreshScheduler);
        retrievalThread.start();

        if (this.savePointPath == null) {
            if (this.context.getConfig().containsKey("savepointPath")) {
                this.savePointPath = this.context.getConfig().get("savepointPath").toString();
                LOGGER.info("Find savePointPath {} from paragraph config.", this.savePointPath);
            }
        }

        JobExecutionResult jobExecutionResult = null;
        if (this.savePointPath != null && Boolean
                .parseBoolean(context.getLocalProperties().getOrDefault("runWithSavePoint", "true"))) {
            LOGGER.info("Run job from savePointPath: " + savePointPath + ", parallelism: " + parallelism);
            jobExecutionResult = stEnv.execute(st);
            //TODO(zjffdu) TableEnvironment has its own entry point to submitting jobs.
            //jobExecutionResult = stEnv.execute(st, SavepointRestoreSettings.forPath(savePointPath));
        } else {
            LOGGER.info("Run job without savePointPath, " + ", parallelism: " + parallelism);
            jobExecutionResult = stEnv.execute(st);
        }
        LOGGER.info("Flink Job is finished");
        return new InterpreterResult(InterpreterResult.Code.SUCCESS);
    } catch (Exception e) {
        LOGGER.error("Fail to run stream sql job", e);
        if (e.getCause() instanceof JobCancellationException) {
            return new InterpreterResult(InterpreterResult.Code.ERROR,
                    ExceptionUtils.getStackTrace(e.getCause()));
        }
        return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
    }
}

From source file:com.baifendian.swordfish.masterserver.master.MasterServiceImpl.java

public MasterServiceImpl() {
    this.flowDao = DaoFactory.getDaoInstance(FlowDao.class);
    this.adHocDao = DaoFactory.getDaoInstance(AdHocDao.class);
    this.streamingDao = DaoFactory.getDaoInstance(StreamingDao.class);

    this.executorServerManager = new ExecutorServerManager();
    this.executionFlowQueue = new LinkedBlockingQueue<>(MasterConfig.executionFlowQueueSize);
    this.checkService = Executors.newScheduledThreadPool(5);
}

From source file:org.apache.hadoop.hive.serde2.objectinspector.TestReflectionObjectInspectors.java

public void testObjectInspectorThreadSafety() throws InterruptedException {
    final int workerCount = 5; // 5 workers to run getReflectionObjectInspector concurrently
    final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(workerCount);
    final MutableObject exception = new MutableObject();
    Thread runner = new Thread(new Runnable() {
        @Override/*from   w w w  .j  av  a2  s  .  co  m*/
        @SuppressWarnings("unchecked")
        public void run() {
            Future<ObjectInspector>[] results = (Future<ObjectInspector>[]) new Future[workerCount];
            ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>[] types = (ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>[]) new ObjectPair[] {
                    new ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>(Complex.class,
                            ObjectInspectorFactory.ObjectInspectorOptions.THRIFT),
                    new ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>(MyStruct.class,
                            ObjectInspectorFactory.ObjectInspectorOptions.JAVA), };
            try {
                for (int i = 0; i < 20; i++) { // repeat 20 times
                    for (final ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions> t : types) {
                        ObjectInspectorFactory.objectInspectorCache.clear();
                        for (int k = 0; k < workerCount; k++) {
                            results[k] = executorService.schedule(new Callable<ObjectInspector>() {
                                @Override
                                public ObjectInspector call() throws Exception {
                                    return ObjectInspectorFactory.getReflectionObjectInspector(t.getFirst(),
                                            t.getSecond());
                                }
                            }, 50, TimeUnit.MILLISECONDS);
                        }
                        ObjectInspector oi = results[0].get();
                        for (int k = 1; k < workerCount; k++) {
                            assertEquals(oi, results[k].get());
                        }
                    }
                }
            } catch (Throwable e) {
                exception.setValue(e);
            }
        }
    });
    try {
        runner.start();
        long endTime = System.currentTimeMillis() + 300000; // timeout in 5 minutes
        while (runner.isAlive()) {
            if (System.currentTimeMillis() > endTime) {
                runner.interrupt(); // Interrupt the runner thread
                fail("Timed out waiting for the runner to finish");
            }
            runner.join(10000);
        }
        if (exception.getValue() != null) {
            fail("Got exception: " + exception.getValue());
        }
    } finally {
        executorService.shutdownNow();
    }
}