Example usage for java.util.concurrent ScheduledThreadPoolExecutor ScheduledThreadPoolExecutor

List of usage examples for java.util.concurrent ScheduledThreadPoolExecutor ScheduledThreadPoolExecutor

Introduction

In this page you can find the example usage for java.util.concurrent ScheduledThreadPoolExecutor ScheduledThreadPoolExecutor.

Prototype

public ScheduledThreadPoolExecutor(int corePoolSize) 

Source Link

Document

Creates a new ScheduledThreadPoolExecutor with the given core pool size.

Usage

From source file:com.n0n3m4.q3e.Q3ECallbackObj.java

public void init(int size) {
    if (mAudioTrack != null)
        return;/*from   w ww.  ja  v  a  2s .c o  m*/
    if ((Q3EUtils.q3ei.isQ3) || (Q3EUtils.q3ei.isRTCW) || (Q3EUtils.q3ei.isQ1) || (Q3EUtils.q3ei.isQ2))
        size /= 8;

    mAudioData = new byte[size];
    int sampleFreq = 44100;

    /*                        _.---"'"""""'`--.._
                     _,.-'                   `-._
                 _,."                            -.
             .-""   ___...---------.._             `.
             `---'""                  `-.            `.
                                         `.            \
                                           `.           \
                                             \           \
                                              .           \
                                              |            .
                                              |            |
                        _________             |            |
                  _,.-'"         `"'-.._      :            |
              _,-'                      `-._.'             |
           _.'            OUYA               `.             '
      _.-.    _,+......__                           `.          .
    .'    `-"'           `"-.,-""--._                 \        /
    /    ,'                  |    __  \                 \      /
    `   ..                       +"  )  \                 \    /
    `.'  \          ,-"`-..    |       |                  \  /
    / " |        .'       \   '.    _.'                   .'
    |,.."--"""--..|    "    |    `""`.                     |
    ,"               `-._     |        |                     |
    .'                     `-._+         |                     |
    /                           `.                        /     |
    |    `     '                  |                      /      |
    `-.....--.__                  |              |      /       |
    `./ "| / `-.........--.-   '              |    ,'        '
    /| ||        `.'  ,'   .'               |_,-+         /
    / ' '.`.        _,'   ,'     `.          |   '   _,.. /
    /   `.  `"'"'""'"   _,^--------"`.        |    `.'_  _/
    /... _.`:.________,.'              `._,.-..|        "'
    `.__.'                                 `._  /
                                   "' */

    int bufferSize = Math.max((Q3EUtils.isOuya) ? 0 : 3 * size, AudioTrack.getMinBufferSize(sampleFreq,
            AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT));
    mAudioTrack = new Q3EAudioTrack(AudioManager.STREAM_MUSIC, sampleFreq,
            AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT, bufferSize,
            AudioTrack.MODE_STREAM);
    mAudioTrack.play();
    long sleeptime = (size * 1000000000l) / (2 * 2 * sampleFreq);
    ScheduledThreadPoolExecutor stpe = new ScheduledThreadPoolExecutor(5);
    stpe.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            if (reqThreadrunning) {
                Q3EJNI.requestAudioData();
            }
        }
    }, 0, sleeptime, TimeUnit.NANOSECONDS);
}

From source file:com.fusesource.forge.jmstest.executor.BenchmarkJMSProducerWrapper.java

private void runProducers(long rate, long duration) {

    BigDecimal bd = new BigDecimal(1000000).divide(new BigDecimal(rate), BigDecimal.ROUND_HALF_DOWN);
    long delayInMicroSeconds;
    try {// w  ww. j  a va  2 s .co m
        delayInMicroSeconds = bd.longValueExact();
    } catch (ArithmeticException e) {
        delayInMicroSeconds = bd.longValue();
        log().warn("Publish rate cannot be expressed as a precise microsecond value, rounding to nearest value "
                + "[actualDelay: " + delayInMicroSeconds + "]");
    }

    int producersNeeded = (int) (rate / getPartConfig().getMaxConsumerRatePerThread());
    if (producersNeeded == 0) {
        producersNeeded++;
    }

    log.debug("Running " + producersNeeded + " producers for " + duration + "s");
    producers = new ArrayList<BenchmarkProducer>(producersNeeded);
    sendingDelay = delayInMicroSeconds * producersNeeded;
    executor = new ScheduledThreadPoolExecutor(producersNeeded);

    for (int i = 0; i < producersNeeded; i++) {
        try {
            BenchmarkProducer producer = new BenchmarkProducer(this);
            producer.start();
            producer.setMessageCounter(getProbe());
            producers.add(producer);
        } catch (Exception e) {
            throw new BenchmarkConfigurationException("Unable to create BenchmarkProducer instance", e);
        }
    }
    for (BenchmarkProducer producer : producers) {
        // TODO should really hold onto these and monitor for failures until the
        // executor is shutdown
        executor.scheduleAtFixedRate(new MessageSender(producer), 0, sendingDelay, sendingDelayUnit);
    }

    final CountDownLatch latch = new CountDownLatch(1);

    new ScheduledThreadPoolExecutor(1).schedule(new Runnable() {
        public void run() {
            try {
                log.debug("Shutting down producers.");
                executor.shutdown();
                for (BenchmarkProducer producer : producers) {
                    try {
                        producer.release();
                    } catch (Exception e) {
                        log().error("Error releasing producer.");
                    }
                }
                latch.countDown();
            } catch (Exception e) {
            }
        }
    }, duration, TimeUnit.SECONDS);

    try {
        latch.await();
    } catch (InterruptedException ie) {
        log().warn("Producer run has been interrupted ...");
    }
}

From source file:com.netflix.curator.framework.recipes.locks.TestReaper.java

@Test
public void testSparseUseNoReap() throws Exception {
    final int THRESHOLD = 3000;

    Timing timing = new Timing();
    Reaper reaper = null;/*  w  w w  . j  a  v  a2  s .  c om*/
    Future<Void> watcher = null;
    CuratorFramework client = makeClient(timing, null);
    try {
        client.start();
        client.create().creatingParentsIfNeeded().forPath("/one/two/three");

        Assert.assertNotNull(client.checkExists().forPath("/one/two/three"));

        final Queue<Reaper.PathHolder> holders = new ConcurrentLinkedQueue<Reaper.PathHolder>();
        final ExecutorService pool = Executors.newCachedThreadPool();
        ScheduledExecutorService service = new ScheduledThreadPoolExecutor(1) {
            @Override
            public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
                final Reaper.PathHolder pathHolder = (Reaper.PathHolder) command;
                holders.add(pathHolder);
                final ScheduledFuture<?> f = super.schedule(command, delay, unit);
                pool.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {
                        f.get();
                        holders.remove(pathHolder);
                        return null;
                    }
                });
                return f;
            }
        };

        reaper = new Reaper(client, service, THRESHOLD);
        reaper.start();
        reaper.addPath("/one/two/three");

        long start = System.currentTimeMillis();
        boolean emptyCountIsCorrect = false;
        while (((System.currentTimeMillis() - start) < timing.forWaiting().milliseconds())
                && !emptyCountIsCorrect) // need to loop as the Holder can go in/out of the Reaper's DelayQueue
        {
            for (Reaper.PathHolder holder : holders) {
                if (holder.path.endsWith("/one/two/three")) {
                    emptyCountIsCorrect = (holder.emptyCount > 0);
                    break;
                }
            }
            Thread.sleep(1);
        }
        Assert.assertTrue(emptyCountIsCorrect);

        client.create().forPath("/one/two/three/foo");

        Thread.sleep(2 * (THRESHOLD / Reaper.EMPTY_COUNT_THRESHOLD));
        Assert.assertNotNull(client.checkExists().forPath("/one/two/three"));
        client.delete().forPath("/one/two/three/foo");

        Thread.sleep(THRESHOLD);
        timing.sleepABit();

        Assert.assertNull(client.checkExists().forPath("/one/two/three"));
    } finally {
        if (watcher != null) {
            watcher.cancel(true);
        }
        IOUtils.closeQuietly(reaper);
        IOUtils.closeQuietly(client);
    }
}

From source file:org.mule.config.pool.DefaultThreadPoolFactory.java

protected ScheduledThreadPoolExecutor internalCreateScheduledPool(ThreadingProfile tp) {
    ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(
            Math.min(tp.getMaxThreadsIdle(), tp.getMaxThreadsActive()));
    scheduledThreadPoolExecutor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
    scheduledThreadPoolExecutor.setExecuteExistingDelayedTasksAfterShutdownPolicy(true);
    scheduledThreadPoolExecutor.setKeepAliveTime(tp.getThreadTTL(), TimeUnit.MILLISECONDS);
    scheduledThreadPoolExecutor.setCorePoolSize(tp.getMaxThreadsIdle());
    scheduledThreadPoolExecutor.setMaximumPoolSize(tp.getMaxThreadsActive());
    return scheduledThreadPoolExecutor;
}

From source file:com.murdock.tools.invocationstats.InvocationStatsServiceImpl.java

@Override
public void crontabExecuteAllStatsMethods(InvocationStatsExecutor executor, int intervalSeconds) {
    if (executor != null && intervalSeconds > 0) {
        try {//ww  w  .j ava2 s. com
            lock.lock();
            if (!start) {
                threadPool = new ScheduledThreadPoolExecutor(1);
                ExecuteJob job = new ExecuteJob(executor);
                threadPool.scheduleAtFixedRate(job, intervalSeconds, intervalSeconds, TimeUnit.SECONDS);
                start = true;
            }
        } finally {
            lock.unlock();
        }
    }
}

From source file:io.fabric8.insight.metrics.service.MetricsCollector.java

@Activate
private void activate(BundleContext bundleContext) throws Exception {
    this.bundleContext = bundleContext;
    this.executor = new ScheduledThreadPoolExecutor(threadPoolSize);
    this.executor.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
    this.executor.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);

    this.executor.scheduleWithFixedDelay(new Runnable() {
        @Override/*from  w ww  . j av  a  2  s .  c  o  m*/
        public void run() {
            process();
        }
    }, 1, defaultDelay, TimeUnit.SECONDS);

    JMXUtils.registerMBean(this, mbeanServer, new ObjectName("io.fabric8.insight:type=MetricsCollector"));
}

From source file:org.rhq.core.pc.drift.DriftManager.java

@Override
public void initialize() {
    long initStartTime = System.currentTimeMillis();
    boolean success = changeSetsDir.mkdir();
    if (!success) {
        log.warn("Could not create change sets directory " + changeSetsDir);
        initialized = false;/*  ww w .j av  a 2  s .  c  o m*/
        return;
    }
    changeSetMgr = new ChangeSetManagerImpl(changeSetsDir);

    DriftDetector driftDetector = new DriftDetector();
    driftDetector.setScheduleQueue(schedulesQueue);
    driftDetector.setChangeSetManager(changeSetMgr);
    driftDetector.setDriftClient(this);

    InventoryManager inventoryMgr = PluginContainer.getInstance().getInventoryManager();
    long startTime = System.currentTimeMillis();
    initSchedules(inventoryMgr.getPlatform(), inventoryMgr);
    long endTime = System.currentTimeMillis();

    if (log.isInfoEnabled()) {
        log.info("Finished initializing drift detection schedules in " + (endTime - startTime) + " ms");
    }

    scanForContentToResend();
    purgeDeletedDriftDefDirs();

    driftThreadPool = new ScheduledThreadPoolExecutor(5);

    long initialDelay = pluginContainerConfiguration.getDriftDetectionInitialDelay();
    long period = pluginContainerConfiguration.getDriftDetectionPeriod();
    if (period > 0) {
        // note that drift detection is globally disabled if the detection period is 0 or less
        driftThreadPool.scheduleAtFixedRate(driftDetector, initialDelay, period, TimeUnit.SECONDS);
    } else {
        log.info("Drift detection has been globally disabled as per plugin container configuration");
    }

    initialized = true;
    long initEndTime = System.currentTimeMillis();
    if (log.isInfoEnabled()) {
        log.info("Finished initialization in " + (initEndTime - initStartTime) + " ms");
    }
}

From source file:eu.eidas.node.auth.metadata.NODEFileMetadataProcessor.java

private void initFileMonitor() {
    if (fileService != null && fileService.existsFile(".")) {
        try {/* w  w w.ja  v a 2 s  . co  m*/
            monitor = new FileAlterationMonitor(MONITOR_INTERVAL);
            observer = new FileAlterationObserver(fileService.getRepositoryDir());
            xmlObserver = new XMLObserver();
            observer.addListener(xmlObserver);
            monitor.addObserver(observer);
            monitor.start();

            //periodically refresh static metadata
            stpe = new ScheduledThreadPoolExecutor(1);
            refreshCommand = new RefreshStaticMetadata(xmlObserver, fileService);
            //TODO externalize the interval between refreshes in the property file
            stpe.scheduleAtFixedRate(refreshCommand, 1, 24, TimeUnit.HOURS);

        } catch (Exception e) {
            LOG.error("fail to stop file monitor {}", e);
        }
    }
}

From source file:com.streamsets.pipeline.kafka.impl.BaseKafkaConsumer09.java

public BaseKafkaConsumer09(String topic, Source.Context context, int batchSize) {
    this.topic = topic;
    this.topicPartitionToOffsetMetadataMap = new HashMap<>();
    this.recordQueue = new ArrayBlockingQueue<>(batchSize);
    this.executorService = new ScheduledThreadPoolExecutor(1);
    this.pollCommitMutex = new Object();
    this.rebalanceInProgress = new AtomicBoolean(false);
    this.needToCallPoll = new AtomicBoolean(false);
    this.context = context;
    this.rebalanceHistogram = context.createHistogram("Rebalance Time");
    this.gaugeMap = context.createGauge("Internal state").getValue();
}

From source file:org.apache.hadoop.hdfs.BlockStorageLocationUtil.java

/**
 * Queries datanodes for the blocks specified in <code>datanodeBlocks</code>,
 * making one RPC to each datanode. These RPCs are made in parallel using a
 * threadpool./*from   ww w.ja  v  a 2  s.c o  m*/
 *
 * @param datanodeBlocks
 *     Map of datanodes to the blocks present on the DN
 * @return metadatas Map of datanodes to block metadata of the DN
 * @throws InvalidBlockTokenException
 *     if client does not have read access on a requested block
 */
static Map<DatanodeInfo, HdfsBlocksMetadata> queryDatanodesForHdfsBlocksMetadata(Configuration conf,
        Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks, int poolsize, int timeoutMs,
        boolean connectToDnViaHostname, Tracer tracer, SpanId parentSpanId) throws InvalidBlockTokenException {

    List<VolumeBlockLocationCallable> callables = createVolumeBlockLocationCallables(conf, datanodeBlocks,
            timeoutMs, connectToDnViaHostname, tracer, parentSpanId);

    // Use a thread pool to execute the Callables in parallel
    List<Future<HdfsBlocksMetadata>> futures = new ArrayList<>();
    ExecutorService executor = new ScheduledThreadPoolExecutor(poolsize);
    try {
        futures = executor.invokeAll(callables, timeoutMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        // Swallow the exception here, because we can return partial results
    }
    executor.shutdown();

    Map<DatanodeInfo, HdfsBlocksMetadata> metadatas = Maps.newHashMapWithExpectedSize(datanodeBlocks.size());
    // Fill in metadatas with results from DN RPCs, where possible
    for (int i = 0; i < futures.size(); i++) {
        VolumeBlockLocationCallable callable = callables.get(i);
        DatanodeInfo datanode = callable.getDatanodeInfo();
        Future<HdfsBlocksMetadata> future = futures.get(i);
        try {
            HdfsBlocksMetadata metadata = future.get();
            metadatas.put(callable.getDatanodeInfo(), metadata);
        } catch (CancellationException e) {
            LOG.info(
                    "Cancelled while waiting for datanode " + datanode.getIpcAddr(false) + ": " + e.toString());
        } catch (ExecutionException e) {
            Throwable t = e.getCause();
            if (t instanceof InvalidBlockTokenException) {
                LOG.warn("Invalid access token when trying to retrieve " + "information from datanode "
                        + datanode.getIpcAddr(false));
                throw (InvalidBlockTokenException) t;
            } else if (t instanceof UnsupportedOperationException) {
                LOG.info("Datanode " + datanode.getIpcAddr(false) + " does not support"
                        + " required #getHdfsBlocksMetadata() API");
                throw (UnsupportedOperationException) t;
            } else {
                LOG.info(
                        "Failed to query block locations on datanode " + datanode.getIpcAddr(false) + ": " + t);
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Could not fetch information from datanode", t);
            }
        } catch (InterruptedException e) {
            // Shouldn't happen, because invokeAll waits for all Futures to be ready
            LOG.info("Interrupted while fetching HdfsBlocksMetadata");
        }
    }

    return metadatas;
}