Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:org.apache.nifi.provenance.MiNiFiPersistentProvenanceRepository.java

public MiNiFiPersistentProvenanceRepository(final RepositoryConfiguration configuration,
        final int rolloverCheckMillis) throws IOException {
    if (configuration.getStorageDirectories().isEmpty()) {
        throw new IllegalArgumentException("Must specify at least one storage directory");
    }/*from  www.  j  a  va2s.c om*/

    this.configuration = configuration;
    this.maxAttributeChars = configuration.getMaxAttributeChars();

    for (final File file : configuration.getStorageDirectories()) {
        final Path storageDirectory = file.toPath();
        final Path journalDirectory = storageDirectory.resolve("journals");

        if (!Files.exists(journalDirectory)) {
            Files.createDirectories(journalDirectory);
        } else if (!Files.isDirectory(journalDirectory)) {
            throw new IllegalArgumentException("Storage Location " + journalDirectory + " is not a directory");
        }
    }

    this.maxPartitionMillis = configuration.getMaxEventFileLife(TimeUnit.MILLISECONDS);
    this.maxPartitionBytes = configuration.getMaxEventFileCapacity();
    this.alwaysSync = configuration.isAlwaysSync();
    this.rolloverCheckMillis = rolloverCheckMillis;

    scheduledExecService = Executors.newScheduledThreadPool(3,
            new NamedThreadFactory("Provenance Maintenance Thread"));

    // The number of rollover threads is a little bit arbitrary but comes from the idea that multiple storage directories generally
    // live on separate physical partitions. As a result, we want to use at least one thread per partition in order to utilize the
    // disks efficiently. However, the rollover actions can be somewhat CPU intensive, so we double the number of threads in order
    // to account for that.
    final int numRolloverThreads = configuration.getStorageDirectories().size() * 2;
    rolloverExecutor = Executors.newScheduledThreadPool(numRolloverThreads,
            new NamedThreadFactory("Provenance Repository Rollover Thread"));
}

From source file:org.apache.hadoop.hive.metastore.HiveClientCache.java

private ScheduledFuture<?> createCleanupThread(long interval) {
    // Add a maintenance thread that will attempt to trigger a cache clean continuously
    Runnable cleanupThread = new Runnable() {
        @Override/*from   www .j  ava  2 s. c  o  m*/
        public void run() {
            cleanup();
        }
    };

    /**
     * Create the cleanup handle. In addition to cleaning up every cleanupInterval, we add
     * a slight offset, so that the very first time it runs, it runs with a slight delay, so
     * as to catch any other connections that were closed when the first timeout happened.
     * As a result, the time we can expect an unused connection to be reaped is
     * 5 seconds after the first timeout, and then after that, it'll check for whether or not
     * it can be cleaned every max(DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS,timeout) seconds
     */
    ThreadFactory daemonThreadFactory = (new ThreadFactoryBuilder()).setDaemon(true)
            .setNameFormat("HiveClientCache-cleaner-%d").build();

    return Executors.newScheduledThreadPool(1, daemonThreadFactory).scheduleWithFixedDelay(cleanupThread,
            timeout + 5, interval, TimeUnit.SECONDS);
}

From source file:org.energy_home.jemma.internal.utils.thread.ExecutorManager.java

private void start() {
    log.info("ExecutorManager starting...");
    numberOfOrderedTasks = 0;/* w w  w. j av  a 2 s.  c  o  m*/
    scheduler = Executors.newScheduledThreadPool(1, new SchedulerThreadFactory());
    canceller = Executors.newScheduledThreadPool(1, new CancellerThreadFactory());
    nearRealTimeOrderedTask = new Runnable() {
        public void run() {
            try {
                UserTasks[] tasksArray = null;
                synchronized (nearRealTimeOrderedTasksMap) {
                    if (nearRealTimeOrderedTasksMap.size() > 0) {
                        tasksArray = new UserTasks[nearRealTimeOrderedTasksMap.size()];
                        nearRealTimeOrderedTasksMap.values().toArray(tasksArray);
                    }
                }
                if (tasksArray != null) {
                    for (int i = 0; i < tasksArray.length; i++) {
                        tasksArray[i].run();
                    }
                }
                synchronized (nearRealTimeOrderedTasksMap) {
                    if (numberOfOrderedTasks == 0)
                        nearRealTimeMainTaskFuture.cancel(true);
                }
            } catch (Exception e) {
                log.error("nearRealTimeTask run error", e);
            }
        }
    };
}

From source file:org.apache.flink.mesos.runtime.clusterframework.MesosApplicationMasterRunner.java

/**
 * The main work method, must run as a privileged action.
 *
 * @return The return code for the Java process.
 *//*from  w  w  w.jav a 2s .  com*/
protected int runPrivileged(Configuration config, Configuration dynamicProperties) {

    ActorSystem actorSystem = null;
    WebMonitor webMonitor = null;
    MesosArtifactServer artifactServer = null;
    ScheduledExecutorService futureExecutor = null;
    ExecutorService ioExecutor = null;
    MesosServices mesosServices = null;

    try {
        // ------- (1) load and parse / validate all configurations -------

        // Note that we use the "appMasterHostname" given by the system, to make sure
        // we use the hostnames consistently throughout akka.
        // for akka "localhost" and "localhost.localdomain" are different actors.
        final String appMasterHostname = InetAddress.getLocalHost().getHostName();

        // Mesos configuration
        final MesosConfiguration mesosConfig = createMesosConfig(config, appMasterHostname);

        // JM configuration
        int numberProcessors = Hardware.getNumberCPUCores();

        futureExecutor = Executors.newScheduledThreadPool(numberProcessors,
                new ExecutorThreadFactory("mesos-jobmanager-future"));

        ioExecutor = Executors.newFixedThreadPool(numberProcessors,
                new ExecutorThreadFactory("mesos-jobmanager-io"));

        mesosServices = MesosServicesUtils.createMesosServices(config);

        // TM configuration
        final MesosTaskManagerParameters taskManagerParameters = MesosTaskManagerParameters.create(config);

        LOG.info("TaskManagers will be created with {} task slots",
                taskManagerParameters.containeredParameters().numSlots());
        LOG.info(
                "TaskManagers will be started with container size {} MB, JVM heap size {} MB, "
                        + "JVM direct memory limit {} MB, {} cpus",
                taskManagerParameters.containeredParameters().taskManagerTotalMemoryMB(),
                taskManagerParameters.containeredParameters().taskManagerHeapSizeMB(),
                taskManagerParameters.containeredParameters().taskManagerDirectMemoryLimitMB(),
                taskManagerParameters.cpus());

        // JM endpoint, which should be explicitly configured based on acquired net resources
        final int listeningPort = config.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY,
                ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT);
        checkState(listeningPort >= 0 && listeningPort <= 65536, "Config parameter \""
                + ConfigConstants.JOB_MANAGER_IPC_PORT_KEY + "\" is invalid, it must be between 0 and 65536");

        // ----------------- (2) start the actor system -------------------

        // try to start the actor system, JobManager and JobManager actor system
        // using the configured address and ports
        actorSystem = BootstrapTools.startActorSystem(config, appMasterHostname, listeningPort, LOG);

        Address address = AkkaUtils.getAddress(actorSystem);
        final String akkaHostname = address.host().get();
        final int akkaPort = (Integer) address.port().get();

        LOG.info("Actor system bound to hostname {}.", akkaHostname);

        // try to start the artifact server
        LOG.debug("Starting Artifact Server");
        final int artifactServerPort = config.getInteger(ConfigConstants.MESOS_ARTIFACT_SERVER_PORT_KEY,
                ConfigConstants.DEFAULT_MESOS_ARTIFACT_SERVER_PORT);
        final String artifactServerPrefix = UUID.randomUUID().toString();
        artifactServer = new MesosArtifactServer(artifactServerPrefix, akkaHostname, artifactServerPort,
                config);

        // ----------------- (3) Generate the configuration for the TaskManagers -------------------

        // generate a container spec which conveys the artifacts/vars needed to launch a TM
        ContainerSpecification taskManagerContainerSpec = new ContainerSpecification();

        // propagate the AM dynamic configuration to the TM
        taskManagerContainerSpec.getDynamicConfiguration().addAll(dynamicProperties);

        // propagate newly-generated configuration elements
        final Configuration taskManagerConfig = BootstrapTools.generateTaskManagerConfiguration(
                new Configuration(), akkaHostname, akkaPort,
                taskManagerParameters.containeredParameters().numSlots(), TASKMANAGER_REGISTRATION_TIMEOUT);
        taskManagerContainerSpec.getDynamicConfiguration().addAll(taskManagerConfig);

        // apply the overlays
        applyOverlays(config, taskManagerContainerSpec);

        // configure the artifact server to serve the specified artifacts
        configureArtifactServer(artifactServer, taskManagerContainerSpec);

        // ----------------- (4) start the actors -------------------

        // 1) JobManager & Archive (in non-HA case, the leader service takes this)
        // 2) Web Monitor (we need its port to register)
        // 3) Resource Master for Mesos
        // 4) Process reapers for the JobManager and Resource Master

        // 1: the JobManager
        LOG.debug("Starting JobManager actor");

        // we start the JobManager with its standard name
        ActorRef jobManager = JobManager.startJobManagerActors(config, actorSystem, futureExecutor, ioExecutor,
                new scala.Some<>(JobManager.JOB_MANAGER_NAME()), scala.Option.<String>empty(),
                getJobManagerClass(), getArchivistClass())._1();

        // 2: the web monitor
        LOG.debug("Starting Web Frontend");

        webMonitor = BootstrapTools.startWebMonitorIfConfigured(config, actorSystem, jobManager, LOG);
        if (webMonitor != null) {
            final URL webMonitorURL = new URL("http", appMasterHostname, webMonitor.getServerPort(), "/");
            mesosConfig.frameworkInfo().setWebuiUrl(webMonitorURL.toExternalForm());
        }

        // 3: Flink's Mesos ResourceManager
        LOG.debug("Starting Mesos Flink Resource Manager");

        // create the worker store to persist task information across restarts
        MesosWorkerStore workerStore = mesosServices.createMesosWorkerStore(config, ioExecutor);

        // we need the leader retrieval service here to be informed of new
        // leader session IDs, even though there can be only one leader ever
        LeaderRetrievalService leaderRetriever = LeaderRetrievalUtils.createLeaderRetrievalService(config,
                jobManager);

        Props resourceMasterProps = MesosFlinkResourceManager.createActorProps(getResourceManagerClass(),
                config, mesosConfig, workerStore, leaderRetriever, taskManagerParameters,
                taskManagerContainerSpec, artifactServer, LOG);

        ActorRef resourceMaster = actorSystem.actorOf(resourceMasterProps, "Mesos_Resource_Master");

        // 4: Process reapers
        // The process reapers ensure that upon unexpected actor death, the process exits
        // and does not stay lingering around unresponsive

        LOG.debug("Starting process reapers for JobManager");

        actorSystem.actorOf(Props.create(ProcessReaper.class, resourceMaster, LOG, ACTOR_DIED_EXIT_CODE),
                "Mesos_Resource_Master_Process_Reaper");

        actorSystem.actorOf(Props.create(ProcessReaper.class, jobManager, LOG, ACTOR_DIED_EXIT_CODE),
                "JobManager_Process_Reaper");
    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("Mesos JobManager initialization failed", t);

        if (webMonitor != null) {
            try {
                webMonitor.stop();
            } catch (Throwable ignored) {
                LOG.warn("Failed to stop the web frontend", ignored);
            }
        }

        if (artifactServer != null) {
            try {
                artifactServer.stop();
            } catch (Throwable ignored) {
                LOG.error("Failed to stop the artifact server", ignored);
            }
        }

        if (actorSystem != null) {
            try {
                actorSystem.shutdown();
            } catch (Throwable tt) {
                LOG.error("Error shutting down actor system", tt);
            }
        }

        if (futureExecutor != null) {
            try {
                futureExecutor.shutdownNow();
            } catch (Throwable tt) {
                LOG.error("Error shutting down future executor", tt);
            }
        }

        if (ioExecutor != null) {
            try {
                ioExecutor.shutdownNow();
            } catch (Throwable tt) {
                LOG.error("Error shutting down io executor", tt);
            }
        }

        if (mesosServices != null) {
            try {
                mesosServices.close(false);
            } catch (Throwable tt) {
                LOG.error("Error closing the mesos services.", tt);
            }
        }

        return INIT_ERROR_EXIT_CODE;
    }

    // everything started, we can wait until all is done or the process is killed
    LOG.info("Mesos JobManager started");

    // wait until everything is done
    actorSystem.awaitTermination();

    // if we get here, everything work out jolly all right, and we even exited smoothly
    if (webMonitor != null) {
        try {
            webMonitor.stop();
        } catch (Throwable t) {
            LOG.error("Failed to stop the web frontend", t);
        }
    }

    try {
        artifactServer.stop();
    } catch (Throwable t) {
        LOG.error("Failed to stop the artifact server", t);
    }

    org.apache.flink.runtime.concurrent.Executors.gracefulShutdown(AkkaUtils.getTimeout(config).toMillis(),
            TimeUnit.MILLISECONDS, futureExecutor, ioExecutor);

    try {
        mesosServices.close(true);
    } catch (Throwable t) {
        LOG.error("Failed to clean up and close MesosServices.", t);
    }

    return 0;
}

From source file:org.scale7.cassandra.pelops.pool.CommonsBackedPool.java

private void configureScheduledTasks() {
    if (policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis() > 0) {
        if (policy.isRunMaintenanceTaskDuringInit()) {
            logger.info("Running maintenance tasks during initialization...");
            runMaintenanceTasks();//www  .  java2s.  co  m
        }

        if (Policy.MIN_TIME_BETWEEN_SCHEDULED_TASKS >= policy
                .getTimeBetweenScheduledMaintenanceTaskRunsMillis()) {
            logger.warn(
                    "Setting the scheduled tasks to run less than every {} milliseconds is not a good idea...",
                    Policy.MIN_TIME_BETWEEN_SCHEDULED_TASKS);
        }

        logger.info("Configuring scheduled tasks to run every {} milliseconds",
                policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis());
        executorService = Executors.newScheduledThreadPool(1, new ThreadFactory() {
            @Override
            public Thread newThread(Runnable runnable) {
                Thread thread = new Thread(runnable, "pelops-pool-worker-" + getKeyspace());
                thread.setDaemon(true); // don't make the JVM wait for this thread to exit
                thread.setPriority(Thread.MIN_PRIORITY + 1); // try not to disrupt other threads
                return thread;
            }
        });

        executorService.scheduleWithFixedDelay(new Runnable() {
            @Override
            public void run() {
                logger.debug("Background thread running maintenance tasks");
                try {
                    runMaintenanceTasks();
                } catch (Exception e) {
                    logger.warn("An exception was thrown while running the maintenance tasks", e);
                }
            }
        }, policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis(),
                policy.getTimeBetweenScheduledMaintenanceTaskRunsMillis(), TimeUnit.MILLISECONDS);
    } else {
        logger.warn("Disabling maintenance tasks; dynamic node discovery, node suspension, idle connection "
                + "termination and some running statistics will not be available to this pool.");
    }
}

From source file:org.apache.nifi.minifi.bootstrap.configuration.ingestors.FileChangeIngestor.java

@Override
public void start() {
    executorService = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override/*from  w  ww. j a  v  a  2  s .  com*/
        public Thread newThread(final Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setName("File Change Notifier Thread");
            t.setDaemon(true);
            return t;
        }
    });
    this.executorService.scheduleWithFixedDelay(this, 0, pollingSeconds, DEFAULT_POLLING_PERIOD_UNIT);
}

From source file:org.lizardirc.beancounter.Beancounter.java

private ScheduledExecutorService constructScheduledExecutorService() {
    BasicThreadFactory factory = new BasicThreadFactory.Builder()
            .namingPattern("scheduledExecutorPool-thread%d").daemon(true).build();
    return Executors.newScheduledThreadPool(5, factory); // This seems like it should be enough for the reasonable future
}

From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java

/**
 * Creates an instance./*from w  w  w .  ja  v a2 s.  c  om*/
 *
 * @param corePoolSize core size of the thread pool
 * @param maxPoolSize the max number of threads in the thread pool
 * @param maxConcurrentRequests maximum number of concurrent requests
 * @param client a client for making requests
 * @param clusterCoordinator the cluster coordinator to use for interacting with node statuses
 * @param connectionTimeout the connection timeout specified in milliseconds
 * @param readTimeout the read timeout specified in milliseconds
 * @param callback a callback that will be called whenever all of the responses have been gathered for a request. May be null.
 * @param eventReporter an EventReporter that can be used to notify users of interesting events. May be null.
 * @param nifiProperties properties
 */
public ThreadPoolRequestReplicator(final int corePoolSize, final int maxPoolSize,
        final int maxConcurrentRequests, final Client client, final ClusterCoordinator clusterCoordinator,
        final String connectionTimeout, final String readTimeout, final RequestCompletionCallback callback,
        final EventReporter eventReporter, final NiFiProperties nifiProperties) {
    if (corePoolSize <= 0) {
        throw new IllegalArgumentException("The Core Pool Size must be greater than zero.");
    } else if (maxPoolSize < corePoolSize) {
        throw new IllegalArgumentException("Max Pool Size must be >= Core Pool Size.");
    } else if (client == null) {
        throw new IllegalArgumentException("Client may not be null.");
    }

    this.client = client;
    this.clusterCoordinator = clusterCoordinator;
    this.connectionTimeoutMs = (int) FormatUtils.getTimeDuration(connectionTimeout, TimeUnit.MILLISECONDS);
    this.readTimeoutMs = (int) FormatUtils.getTimeDuration(readTimeout, TimeUnit.MILLISECONDS);
    this.maxConcurrentRequests = maxConcurrentRequests;
    this.responseMapper = new StandardHttpResponseMapper(nifiProperties);
    this.eventReporter = eventReporter;
    this.callback = callback;
    this.nifiProperties = nifiProperties;

    client.property(ClientProperties.CONNECT_TIMEOUT, connectionTimeoutMs);
    client.property(ClientProperties.READ_TIMEOUT, readTimeoutMs);
    client.property(ClientProperties.FOLLOW_REDIRECTS, Boolean.TRUE);

    final AtomicInteger threadId = new AtomicInteger(0);
    final ThreadFactory threadFactory = r -> {
        final Thread t = Executors.defaultThreadFactory().newThread(r);
        t.setDaemon(true);
        t.setName("Replicate Request Thread-" + threadId.incrementAndGet());
        return t;
    };

    executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, 5, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), threadFactory);

    maintenanceExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override
        public Thread newThread(final Runnable r) {
            final Thread t = Executors.defaultThreadFactory().newThread(r);
            t.setDaemon(true);
            t.setName(ThreadPoolRequestReplicator.class.getSimpleName() + " Maintenance Thread");
            return t;
        }
    });

    maintenanceExecutor.scheduleWithFixedDelay(() -> purgeExpiredRequests(), 1, 1, TimeUnit.SECONDS);
}

From source file:org.apache.nifi.remote.util.SiteToSiteRestApiClient.java

public SiteToSiteRestApiClient(final SSLContext sslContext, final HttpProxy proxy,
        final EventReporter eventReporter) {
    this.sslContext = sslContext;
    this.proxy = proxy;
    this.eventReporter = eventReporter;

    ttlExtendTaskExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        private final ThreadFactory defaultFactory = Executors.defaultThreadFactory();

        @Override//from   www  . j a v a 2  s  .co  m
        public Thread newThread(final Runnable r) {
            final Thread thread = defaultFactory.newThread(r);
            thread.setName(Thread.currentThread().getName() + " TTLExtend");
            thread.setDaemon(true);
            return thread;
        }
    });
}

From source file:de.fu_berlin.inf.dpp.net.internal.StreamServiceManager.java

protected void startThreads() {
    sender = new PacketSender();
    Utils.runSafeAsync("StreamServiceManagers-senderThread", log, sender);
    receiver = new PacketReceiver();
    Utils.runSafeAsync("StreamServiceManagers-receiverThread", log, receiver);
    stopSessionExecutor = Executors.newScheduledThreadPool(5, new NamedThreadFactory("StreamSessionStopper-"));

    sessionDispatcher = Executors.newSingleThreadExecutor(new NamedThreadFactory("StreamSessionDispatcher-"));

    negotiatesToUser = Executors/*from  w  w  w  .  j a v a  2 s . c om*/
            .newSingleThreadExecutor(new NamedThreadFactory("StreamSessionNegotiationUser-"));

    negotiations = Executors.newFixedThreadPool(5, new NamedThreadFactory("StreamSessionNegotiation-"));
}