Example usage for java.util.concurrent Executors newScheduledThreadPool

List of usage examples for java.util.concurrent Executors newScheduledThreadPool

Introduction

In this page you can find the example usage for java.util.concurrent Executors newScheduledThreadPool.

Prototype

public static ScheduledExecutorService newScheduledThreadPool(int corePoolSize, ThreadFactory threadFactory) 

Source Link

Document

Creates a thread pool that can schedule commands to run after a given delay, or to execute periodically.

Usage

From source file:com.datatorrent.contrib.kafka.SimpleKafkaConsumer.java

@Override
public void start() {
    monitorException = new AtomicReference<Throwable>(null);
    monitorExceptionCount = new AtomicInteger(0);
    super.start();

    // thread to consume the kafka data
    kafkaConsumerExecutor = Executors.newCachedThreadPool(
            new ThreadFactoryBuilder().setNameFormat("kafka-consumer-" + topic + "-%d").build());

    if (metadataRefreshInterval <= 0 || CollectionUtils.isEmpty(kps)) {
        return;//from ww w . j  a  v a  2  s . c  o m
    }

    // background thread to monitor the kafka metadata change
    metadataRefreshExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
            .setNameFormat("kafka-consumer-monitor-" + topic + "-%d").setDaemon(true).build());

    // start one monitor thread to monitor the leader broker change and trigger some action
    metadataRefreshExecutor.scheduleAtFixedRate(new MetaDataMonitorTask(this), 0, metadataRefreshInterval,
            TimeUnit.MILLISECONDS);
}

From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java

@Test
public void shouldNotExhaustThreads() throws Exception {
    final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(2, testingThreadFactory);
    final GremlinExecutor gremlinExecutor = GremlinExecutor.build().executorService(executorService)
            .scheduledExecutorService(executorService).create();

    final AtomicInteger count = new AtomicInteger(0);
    assertTrue(IntStream.range(0, 1000).mapToObj(i -> gremlinExecutor.eval("1+1")).allMatch(f -> {
        try {/*from   w  w  w . ja  va 2  s .  c o  m*/
            return (Integer) f.get() == 2;
        } catch (Exception ex) {
            throw new RuntimeException(ex);
        } finally {
            count.incrementAndGet();
        }
    }));

    assertEquals(1000, count.intValue());

    executorService.shutdown();
    executorService.awaitTermination(30000, TimeUnit.MILLISECONDS);
}

From source file:com.metamx.druid.realtime.plumber.RealtimePlumberSchool.java

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();/*from   ww w.ja va  2s .co m*/

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new Plumber() {
        private volatile boolean stopped = false;
        private volatile ExecutorService persistExecutor = null;
        private volatile ScheduledExecutorService scheduledExecutor = null;

        private final Map<Long, Sink> sinks = Maps.newConcurrentMap();
        private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>(
                String.CASE_INSENSITIVE_ORDER);

        @Override
        public void startJob() {
            computeBaseDir(schema).mkdirs();
            initializeExecutors();
            bootstrapSinksFromDisk();
            registerServerViewCallback();
            startPersistThread();
        }

        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime)));

                retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval));

                try {
                    segmentAnnouncer.announceSegment(retVal.getSegment());
                    sinks.put(truncatedTime, retVal);
                    sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                            new SingleElementPartitionChunk<Sink>(retVal));
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
            for (Interval interval : query.getIntervals()) {
                querySinks.addAll(sinkTimeline.lookup(interval));
            }

            return toolchest.mergeResults(factory.mergeRunners(EXEC, FunctionalIterable.create(querySinks)
                    .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                        @Override
                        public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                            final Sink theSink = holder.getObject().getChunk(0).getObject();
                            return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(emitter,
                                    builderFn, factory.mergeRunners(EXEC, Iterables.transform(theSink,
                                            new Function<FireHydrant, QueryRunner<T>>() {
                                                @Override
                                                public QueryRunner<T> apply(FireHydrant input) {
                                                    return factory.createRunner(input.getSegment());
                                                }
                                            }))),
                                    new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(),
                                            theSink.getSegment().getVersion(),
                                            theSink.getSegment().getShardSpec().getPartitionNum())));
                        }
                    })));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        // Submits persist-n-merge task for a Sink to the persistExecutor
        private void persistAndMerge(final long truncatedTime, final Sink sink) {
            final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                    new DateTime(truncatedTime));
            persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                @Override
                public void doRun() {
                    final Interval interval = sink.getInterval();

                    for (FireHydrant hydrant : sink) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }

                    final File mergedTarget = new File(computePersistDir(schema, interval), "merged");
                    if (mergedTarget.exists()) {
                        log.info("Skipping already-merged sink: %s", sink);
                        return;
                    }

                    File mergedFile = null;
                    try {
                        List<QueryableIndex> indexes = Lists.newArrayList();
                        for (FireHydrant fireHydrant : sink) {
                            Segment segment = fireHydrant.getSegment();
                            final QueryableIndex queryableIndex = segment.asQueryableIndex();
                            log.info("Adding hydrant[%s]", fireHydrant);
                            indexes.add(queryableIndex);
                        }

                        mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(),
                                mergedTarget);

                        QueryableIndex index = IndexIO.loadIndex(mergedFile);

                        DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment()
                                .withDimensions(Lists.newArrayList(index.getAvailableDimensions())));

                        segmentPublisher.publishSegment(segment);
                    } catch (IOException e) {
                        log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource())
                                .addData("interval", interval).emit();
                    }

                    if (mergedFile != null) {
                        try {
                            if (mergedFile != null) {
                                log.info("Deleting Index File[%s]", mergedFile);
                                FileUtils.deleteDirectory(mergedFile);
                            }
                        } catch (IOException e) {
                            log.warn(e, "Error deleting directory[%s]", mergedFile);
                        }
                    }
                }
            });
        }

        @Override
        public void finishJob() {
            log.info("Shutting down...");

            for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                persistAndMerge(entry.getKey(), entry.getValue());
            }

            while (!sinks.isEmpty()) {
                try {
                    log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ")
                            .join(Iterables.transform(sinks.values(), new Function<Sink, String>() {
                                @Override
                                public String apply(Sink input) {
                                    return input.getSegment().getIdentifier();
                                }
                            })));

                    synchronized (handoffCondition) {
                        while (!sinks.isEmpty()) {
                            handoffCondition.wait();
                        }
                    }
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }

            // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the
            // ServerView sends it a new segment callback
            if (scheduledExecutor != null) {
                scheduledExecutor.shutdown();
            }

            stopped = true;
        }

        private void initializeExecutors() {
            if (persistExecutor == null) {
                persistExecutor = Executors.newFixedThreadPool(1,
                        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
            }
            if (scheduledExecutor == null) {
                scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
                        .setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
            }
        }

        private void bootstrapSinksFromDisk() {
            for (File sinkDir : computeBaseDir(schema).listFiles()) {
                Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

                //final File[] sinkFiles = sinkDir.listFiles();
                // To avoid reading and listing of "merged" dir
                final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String fileName) {
                        return !(Ints.tryParse(fileName) == null);
                    }
                });
                Arrays.sort(sinkFiles, new Comparator<File>() {
                    @Override
                    public int compare(File o1, File o2) {
                        try {
                            return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                        } catch (NumberFormatException e) {
                            log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                            return o1.compareTo(o2);
                        }
                    }
                });

                try {
                    List<FireHydrant> hydrants = Lists.newArrayList();
                    for (File segmentDir : sinkFiles) {
                        log.info("Loading previously persisted segment at [%s]", segmentDir);

                        // Although this has been tackled at start of this method.
                        // Just a doubly-check added to skip "merged" dir. from being added to hydrants 
                        // If 100% sure that this is not needed, this check can be removed.
                        if (Ints.tryParse(segmentDir.getName()) == null) {
                            continue;
                        }

                        hydrants.add(
                                new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                                        Integer.parseInt(segmentDir.getName())));
                    }

                    Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval),
                            hydrants);
                    sinks.put(sinkInterval.getStartMillis(), currSink);
                    sinkTimeline.add(currSink.getInterval(), currSink.getVersion(),
                            new SingleElementPartitionChunk<Sink>(currSink));

                    segmentAnnouncer.announceSegment(currSink.getSegment());
                } catch (IOException e) {
                    log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                            .addData("interval", sinkInterval).emit();
                }
            }
        }

        private void registerServerViewCallback() {
            serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
                @Override
                public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
                    if (stopped) {
                        log.info("Unregistering ServerViewCallback");
                        persistExecutor.shutdown();
                        return ServerView.CallbackAction.UNREGISTER;
                    }

                    if ("realtime".equals(server.getType())) {
                        return ServerView.CallbackAction.CONTINUE;
                    }

                    log.debug("Checking segment[%s] on server[%s]", segment, server);
                    if (schema.getDataSource().equals(segment.getDataSource())) {
                        final Interval interval = segment.getInterval();
                        for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                            final Long sinkKey = entry.getKey();
                            if (interval.contains(sinkKey)) {
                                final Sink sink = entry.getValue();
                                log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                                final String segmentVersion = segment.getVersion();
                                final String sinkVersion = sink.getSegment().getVersion();
                                if (segmentVersion.compareTo(sinkVersion) >= 0) {
                                    log.info("Segment version[%s] >= sink version[%s]", segmentVersion,
                                            sinkVersion);
                                    try {
                                        segmentAnnouncer.unannounceSegment(sink.getSegment());
                                        FileUtils
                                                .deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                        log.info("Removing sinkKey %d for segment %s", sinkKey,
                                                sink.getSegment().getIdentifier());
                                        sinks.remove(sinkKey);
                                        sinkTimeline.remove(sink.getInterval(), sink.getVersion(),
                                                new SingleElementPartitionChunk<Sink>(sink));

                                        synchronized (handoffCondition) {
                                            handoffCondition.notifyAll();
                                        }
                                    } catch (IOException e) {
                                        log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                                schema.getDataSource()).addData("interval", sink.getInterval())
                                                .emit();
                                    }
                                }
                            }
                        }
                    }

                    return ServerView.CallbackAction.CONTINUE;
                }
            });
        }

        private void startPersistThread() {
            final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
            final long windowMillis = windowPeriod.toStandardDuration().getMillis();

            log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow) + windowMillis)));

            ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
                    new Duration(System.currentTimeMillis(),
                            segmentGranularity.increment(truncatedNow) + windowMillis),
                    new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
                    new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                            schema.getDataSource(), schema.getShardSpec().getPartitionNum())) {
                        @Override
                        public ScheduledExecutors.Signal doCall() {
                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            }

                            log.info("Starting merge and push.");

                            long minTimestamp = segmentGranularity
                                    .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis();

                            List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                            for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                                final Long intervalStart = entry.getKey();
                                if (intervalStart < minTimestamp) {
                                    log.info("Adding entry[%s] for merge and push.", entry);
                                    sinksToPush.add(entry);
                                }
                            }

                            for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                                persistAndMerge(entry.getKey(), entry.getValue());
                            }

                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            } else {
                                return ScheduledExecutors.Signal.REPEAT;
                            }
                        }
                    });
        }
    };
}

From source file:com.twosigma.cook.jobclient.JobClient.java

private ScheduledExecutorService startListenService() {
    final ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(1,
            new ThreadFactoryBuilder().setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
                @Override//from  w ww .  jav a  2s . co m
                public void uncaughtException(Thread t, Throwable e) {
                    _log.error("Can not handle exception for listener service.", e);
                }
            }).build());

    scheduledExecutorService.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            // Process Jobs and JobListeners first
            // Simply return if there is no listener.
            if (!_jobUUIDToListener.isEmpty()) {
                // Query active jobs
                ImmutableMap<UUID, Job> currentUUIDToJob;
                try {
                    currentUUIDToJob = queryJobs(_activeUUIDToJob.keySet());
                } catch (JobClientException e) {
                    // Catch and log
                    _log.warn("Failed to query job status for jobs " + _activeUUIDToJob.keySet(), e);
                    return;
                }

                // Invoke listeners and update maps for job
                for (Map.Entry<UUID, Job> entry : currentUUIDToJob.entrySet()) {
                    UUID juuid = entry.getKey();
                    Job currentJob = entry.getValue();
                    if (!_activeUUIDToJob.get(juuid).equals(currentJob)) {
                        // Firstly, invoke job listener if there is a listener associated to this job.
                        final JobListener listener = _jobUUIDToListener.get(juuid);
                        if (listener != null) {
                            // XXX It is completely debatable what should be the correct behavior here
                            // when a listener throws an exception. We have the following possible options:
                            // 1. simply propagate the exception;
                            // 2. keep {@code _activeUUIDToJob} being unchanged and retrying in the next cycle;
                            // 3. simply log the error but the listener will miss this status
                            // update (which is the current behavior).
                            try {
                                listener.onStatusUpdate(currentJob);
                            } catch (Exception e) {
                                _log.warn(
                                        "Failed to invoke listener onStatusUpdate() for " + currentJob
                                                + ". The listener service won't deliver this message again.",
                                        e);
                            }
                        }

                        // Secondly, update internal maps if necessary.
                        if (currentJob.getStatus() != Job.Status.COMPLETED) {
                            _activeUUIDToJob.put(juuid, currentJob);
                        } else {
                            _activeUUIDToJob.remove(juuid);
                            _jobUUIDToListener.remove(juuid);
                        }
                    }
                }
            }
            if (!_groupUUIDToListener.isEmpty()) {
                // Now process Groups and GroupListeners
                // Query active groups
                ImmutableMap<UUID, Group> currentUUIDToGroup;
                try {
                    currentUUIDToGroup = queryGroups(_activeUUIDToGroup.keySet());
                } catch (JobClientException e) {
                    // Catch and log
                    _log.warn("Failed to query group status for groups " + _activeUUIDToGroup.keySet(), e);
                    return;
                }
                // Invoke listeners and update maps for groups
                for (Map.Entry<UUID, Group> entry : currentUUIDToGroup.entrySet()) {
                    UUID guuid = entry.getKey();
                    Group currentGroup = entry.getValue();
                    if (!_activeUUIDToGroup.get(guuid).equals(currentGroup)) {
                        final GroupListener listener = _groupUUIDToListener.get(guuid);
                        if (listener != null) {
                            // Invoke group listeners
                            try {
                                listener.onStatusUpdate(currentGroup);
                            } catch (Exception e) {
                                _log.warn(
                                        "Failed to invoke listener onStatusUpdate() for " + currentGroup
                                                + ". The listener service won't deliver this message again.",
                                        e);
                            }
                        }

                        // Secondly, update internal maps if necessary.
                        if (currentGroup.getStatus() != Group.Status.COMPLETED) {
                            _activeUUIDToGroup.put(guuid, currentGroup);
                        } else {
                            _activeUUIDToGroup.remove(guuid);
                            _groupUUIDToListener.remove(guuid);
                        }
                    }
                }
            }
        }
    }, _statusUpdateInterval, _statusUpdateInterval, TimeUnit.SECONDS);
    return scheduledExecutorService;
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumberSchool.java

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();//w ww.ja v a2s. com

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new Plumber() {
        private volatile boolean stopped = false;
        private volatile ExecutorService persistExecutor = null;
        private volatile ScheduledExecutorService scheduledExecutor = null;

        private final Map<Long, Sink> sinks = Maps.newConcurrentMap();
        private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>(
                String.CASE_INSENSITIVE_ORDER);

        @Override
        public void startJob() {
            computeBaseDir(schema).mkdirs();
            initializeExecutors();
            bootstrapSinksFromDisk();
            registerServerViewCallback();
            startPersistThread();
        }

        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime)));

                retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval));

                try {
                    segmentAnnouncer.announceSegment(retVal.getSegment());
                    sinks.put(truncatedTime, retVal);
                    sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                            new SingleElementPartitionChunk<Sink>(retVal));
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
            for (Interval interval : query.getIntervals()) {
                querySinks.addAll(sinkTimeline.lookup(interval));
            }

            return toolchest.mergeResults(
                    factory.mergeRunners(queryExecutorService, FunctionalIterable.create(querySinks)
                            .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                                @Override
                                public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                                    final Sink theSink = holder.getObject().getChunk(0).getObject();
                                    return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(
                                            emitter, builderFn,
                                            factory.mergeRunners(MoreExecutors.sameThreadExecutor(),
                                                    Iterables.transform(theSink,
                                                            new Function<FireHydrant, QueryRunner<T>>() {
                                                                @Override
                                                                public QueryRunner<T> apply(FireHydrant input) {
                                                                    return factory
                                                                            .createRunner(input.getSegment());
                                                                }
                                                            }))),
                                            new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(),
                                                    theSink.getSegment().getVersion(),
                                                    theSink.getSegment().getShardSpec().getPartitionNum())));
                                }
                            })));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        // Submits persist-n-merge task for a Sink to the persistExecutor
        private void persistAndMerge(final long truncatedTime, final Sink sink) {
            final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                    new DateTime(truncatedTime));
            persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                @Override
                public void doRun() {
                    final Interval interval = sink.getInterval();

                    for (FireHydrant hydrant : sink) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }

                    final File mergedTarget = new File(computePersistDir(schema, interval), "merged");
                    if (mergedTarget.exists()) {
                        log.info("Skipping already-merged sink: %s", sink);
                        return;
                    }

                    File mergedFile = null;
                    try {
                        List<QueryableIndex> indexes = Lists.newArrayList();
                        for (FireHydrant fireHydrant : sink) {
                            Segment segment = fireHydrant.getSegment();
                            final QueryableIndex queryableIndex = segment.asQueryableIndex();
                            log.info("Adding hydrant[%s]", fireHydrant);
                            indexes.add(queryableIndex);
                        }

                        mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(),
                                mergedTarget);

                        QueryableIndex index = IndexIO.loadIndex(mergedFile);

                        DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment()
                                .withDimensions(Lists.newArrayList(index.getAvailableDimensions())));

                        segmentPublisher.publishSegment(segment);
                    } catch (IOException e) {
                        log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource())
                                .addData("interval", interval).emit();
                    }

                    if (mergedFile != null) {
                        try {
                            if (mergedFile != null) {
                                log.info("Deleting Index File[%s]", mergedFile);
                                FileUtils.deleteDirectory(mergedFile);
                            }
                        } catch (IOException e) {
                            log.warn(e, "Error deleting directory[%s]", mergedFile);
                        }
                    }
                }
            });
        }

        @Override
        public void finishJob() {
            log.info("Shutting down...");

            for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                persistAndMerge(entry.getKey(), entry.getValue());
            }

            while (!sinks.isEmpty()) {
                try {
                    log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ")
                            .join(Iterables.transform(sinks.values(), new Function<Sink, String>() {
                                @Override
                                public String apply(Sink input) {
                                    return input.getSegment().getIdentifier();
                                }
                            })));

                    synchronized (handoffCondition) {
                        while (!sinks.isEmpty()) {
                            handoffCondition.wait();
                        }
                    }
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }

            // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the
            // ServerView sends it a new segment callback
            if (scheduledExecutor != null) {
                scheduledExecutor.shutdown();
            }

            stopped = true;
        }

        private void initializeExecutors() {
            if (persistExecutor == null) {
                persistExecutor = Executors.newFixedThreadPool(1,
                        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
            }
            if (scheduledExecutor == null) {
                scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
                        .setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
            }
        }

        private void bootstrapSinksFromDisk() {
            File baseDir = computeBaseDir(schema);
            if (baseDir == null || !baseDir.exists()) {
                return;
            }

            File[] files = baseDir.listFiles();
            if (files == null) {
                return;
            }

            for (File sinkDir : files) {
                Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

                //final File[] sinkFiles = sinkDir.listFiles();
                // To avoid reading and listing of "merged" dir
                final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String fileName) {
                        return !(Ints.tryParse(fileName) == null);
                    }
                });
                Arrays.sort(sinkFiles, new Comparator<File>() {
                    @Override
                    public int compare(File o1, File o2) {
                        try {
                            return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                        } catch (NumberFormatException e) {
                            log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                            return o1.compareTo(o2);
                        }
                    }
                });

                try {
                    List<FireHydrant> hydrants = Lists.newArrayList();
                    for (File segmentDir : sinkFiles) {
                        log.info("Loading previously persisted segment at [%s]", segmentDir);

                        // Although this has been tackled at start of this method.
                        // Just a doubly-check added to skip "merged" dir. from being added to hydrants 
                        // If 100% sure that this is not needed, this check can be removed.
                        if (Ints.tryParse(segmentDir.getName()) == null) {
                            continue;
                        }

                        hydrants.add(
                                new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                                        Integer.parseInt(segmentDir.getName())));
                    }

                    Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval),
                            hydrants);
                    sinks.put(sinkInterval.getStartMillis(), currSink);
                    sinkTimeline.add(currSink.getInterval(), currSink.getVersion(),
                            new SingleElementPartitionChunk<Sink>(currSink));

                    segmentAnnouncer.announceSegment(currSink.getSegment());
                } catch (IOException e) {
                    log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                            .addData("interval", sinkInterval).emit();
                }
            }
        }

        private void registerServerViewCallback() {
            serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
                @Override
                public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
                    if (stopped) {
                        log.info("Unregistering ServerViewCallback");
                        persistExecutor.shutdown();
                        return ServerView.CallbackAction.UNREGISTER;
                    }

                    if ("realtime".equals(server.getType())) {
                        return ServerView.CallbackAction.CONTINUE;
                    }

                    log.debug("Checking segment[%s] on server[%s]", segment, server);
                    if (schema.getDataSource().equals(segment.getDataSource())) {
                        final Interval interval = segment.getInterval();
                        for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                            final Long sinkKey = entry.getKey();
                            if (interval.contains(sinkKey)) {
                                final Sink sink = entry.getValue();
                                log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                                final String segmentVersion = segment.getVersion();
                                final String sinkVersion = sink.getSegment().getVersion();
                                if (segmentVersion.compareTo(sinkVersion) >= 0) {
                                    log.info("Segment version[%s] >= sink version[%s]", segmentVersion,
                                            sinkVersion);
                                    try {
                                        segmentAnnouncer.unannounceSegment(sink.getSegment());
                                        FileUtils
                                                .deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                        log.info("Removing sinkKey %d for segment %s", sinkKey,
                                                sink.getSegment().getIdentifier());
                                        sinks.remove(sinkKey);
                                        sinkTimeline.remove(sink.getInterval(), sink.getVersion(),
                                                new SingleElementPartitionChunk<Sink>(sink));

                                        synchronized (handoffCondition) {
                                            handoffCondition.notifyAll();
                                        }
                                    } catch (IOException e) {
                                        log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                                schema.getDataSource()).addData("interval", sink.getInterval())
                                                .emit();
                                    }
                                }
                            }
                        }
                    }

                    return ServerView.CallbackAction.CONTINUE;
                }
            });
        }

        private void startPersistThread() {
            final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
            final long windowMillis = windowPeriod.toStandardDuration().getMillis();

            log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow) + windowMillis)));

            ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
                    new Duration(System.currentTimeMillis(),
                            segmentGranularity.increment(truncatedNow) + windowMillis),
                    new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
                    new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                            schema.getDataSource(), schema.getShardSpec().getPartitionNum())) {
                        @Override
                        public ScheduledExecutors.Signal doCall() {
                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            }

                            log.info("Starting merge and push.");

                            long minTimestamp = segmentGranularity
                                    .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis();

                            List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                            for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                                final Long intervalStart = entry.getKey();
                                if (intervalStart < minTimestamp) {
                                    log.info("Adding entry[%s] for merge and push.", entry);
                                    sinksToPush.add(entry);
                                }
                            }

                            for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                                persistAndMerge(entry.getKey(), entry.getValue());
                            }

                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            } else {
                                return ScheduledExecutors.Signal.REPEAT;
                            }
                        }
                    });
        }
    };
}

From source file:edu.umass.cs.gigapaxos.PaxosManager.java

/**
 * @param id//from   w  w w  . jav  a 2s .com
 *            My node ID.
 * @param unstringer
 *            An instance of Stringifiable that can convert String to
 *            NodeIDType.
 * @param niot
 *            InterfaceNIOTransport or InterfaceMessenger object used for
 *            messaging.
 * @param pi
 *            InterfaceReplicable application controlled by gigapaxos.
 *            Currently, all paxos instances must correspond to a single
 *            umbrella application even though each createPaxosInstance
 *            method explicitly specifies the app and this information is
 *            stored explicitly inside a paxos instance. The reason for the
 *            single umbrella app restriction is that we won't have a
 *            pointer to the appropriate app upon recovery otherwise.
 * @param paxosLogFolder
 *            Paxos logging folder.
 * @param enableNullCheckpoints
 *            Whether null checkpoints are enabled. We need this flag to be
 *            enabled if we intend to reconfigure paxos groups managed by
 *            this PaxosManager. Otherwise, we can not distinguish between a
 *            null checkpoint and no checkpoint, so the next epoch members
 *            may be waiting forever for the previous epoch's final state
 *            (that happens to be null). This flag needs to be set at
 *            construction time and can not be changed thereafter.
 */
public PaxosManager(NodeIDType id, Stringifiable<NodeIDType> unstringer,
        InterfaceNIOTransport<NodeIDType, JSONObject> niot, Replicable pi, String paxosLogFolder,
        boolean enableNullCheckpoints) {
    this.myID = this.integerMap.put(id);// id.hashCode();
    this.executor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread thread = Executors.defaultThreadFactory().newThread(r);
            thread.setName(PaxosManager.class.getSimpleName() + myID);
            return thread;
        }
    });
    this.unstringer = unstringer;
    this.largeCheckpointer = new LargeCheckpointer(paxosLogFolder, this.myID + "");
    this.myApp = LargeCheckpointer.wrap(pi, largeCheckpointer);
    this.FD = new FailureDetection<NodeIDType>(id, niot, paxosLogFolder);
    this.pinstances = new MultiArrayMap<String, PaxosInstanceStateMachine>(
            Config.getGlobalInt(PC.PINSTANCES_CAPACITY));
    this.corpses = new HashMap<String, PaxosInstanceStateMachine>();
    // this.activePaxii = new HashMap<String, ActivePaxosState>();
    this.messenger = (new PaxosMessenger<NodeIDType>(niot, this.integerMap));
    this.paxosLogger = new SQLPaxosLogger(this.myID, id.toString(), paxosLogFolder,
            this.wrapMessenger(this.messenger));
    this.nullCheckpointsEnabled = enableNullCheckpoints;
    // periodically remove active state for idle paxii
    executor.scheduleWithFixedDelay(new Deactivator(), 0, Config.getGlobalInt(PC.DEACTIVATION_PERIOD),
            TimeUnit.MILLISECONDS);
    this.pendingDigests = new PendingDigests(this.outstanding.requests,
            Config.getGlobalInt(PC.NUM_MESSAGE_DIGESTS), new PendingDigests.PendingDigestCallback() {
                public void callback(AcceptPacket accept) {
                    PaxosManager.this.callbackDigestedAcceptTimeout(accept);
                }
            });
    this.initOutstandingMonitor();
    (this.requestBatcher = new RequestBatcher(this)).start();
    (this.ppBatcher = new PaxosPacketBatcher(this)).start();
    testingInitialization();
    // needed to unclose when testing multiple runs of open and close
    open();
    // so paxos packets will come to me before anyone else
    niot.precedePacketDemultiplexer(
            Config.getGlobalString(PC.JSON_LIBRARY).equals("org.json") ? new JSONDemultiplexer()
                    : new FastDemultiplexer());
    initiateRecovery();
    if (!Config.getGlobalBoolean(PC.DELAY_PROFILER))
        DelayProfiler.disable();
}

From source file:org.epics.archiverappliance.config.DefaultConfigService.java

@Override
public void initialize(ServletContext sce) throws ConfigException {
    this.servletContext = sce;
    String contextPath = sce.getContextPath();
    logger.info("DefaultConfigService was created with a servlet context " + contextPath);

    try {/*from   ww  w  .  ja va  2  s. c  o m*/
        String pathToVersionTxt = sce.getRealPath("ui/comm/version.txt");
        logger.debug("The full path to the version.txt is " + pathToVersionTxt);
        List<String> lines = Files.readAllLines(Paths.get(pathToVersionTxt), Charset.forName("UTF-8"));
        for (String line : lines) {
            configlogger.info(line);
        }
    } catch (Throwable t) {
        logger.fatal("Unable to determine appliance version", t);
    }

    try {
        // We first try Java system properties for this appliance's identity
        // If a property is not defined, then we check the environment.
        // This gives us the ability to cater to unit tests as well as running using buildAndDeploy scripts without touching the server.xml file.
        // Probably not the most standard way but suited to this need.
        // Finally, we use the local machine's hostname as the myidentity.
        myIdentity = System.getProperty(ARCHAPPL_MYIDENTITY);
        if (myIdentity == null) {
            myIdentity = System.getenv(ARCHAPPL_MYIDENTITY);
            if (myIdentity != null) {
                logger.info("Obtained my identity from environment variable " + myIdentity);
            } else {
                logger.info("Using the local machine's hostname " + myIdentity + " as my identity");
                myIdentity = InetAddress.getLocalHost().getCanonicalHostName();
            }
            if (myIdentity == null) {
                throw new ConfigException("Unable to determine identity of this appliance");
            }
        } else {
            logger.info("Obtained my identity from Java system properties " + myIdentity);
        }

        logger.info("My identity is " + myIdentity);
    } catch (Exception ex) {
        String msg = "Cannot determine this appliance's identity using either the environment variable "
                + ARCHAPPL_MYIDENTITY + " or the java system property " + ARCHAPPL_MYIDENTITY;
        configlogger.fatal(msg);
        throw new ConfigException(msg, ex);
    }
    // Appliances should be local and come straight from persistence.
    try {
        appliances = AppliancesList.loadAppliancesXML(servletContext);
    } catch (Exception ex) {
        throw new ConfigException("Exception loading appliances.xml", ex);
    }

    myApplianceInfo = appliances.get(myIdentity);
    if (myApplianceInfo == null)
        throw new ConfigException("Unable to determine applianceinfo using identity " + myIdentity);
    configlogger.info("My identity is " + myApplianceInfo.getIdentity() + " and my mgmt URL is "
            + myApplianceInfo.getMgmtURL());

    // To make sure we are not starting multiple appliance with the same identity, we make sure that the hostnames match
    try {
        String machineHostName = InetAddress.getLocalHost().getCanonicalHostName();
        String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":");
        String myHostNameFromInfo = myAddrParts[0];
        if (myHostNameFromInfo.equals("localhost")) {
            logger.debug(
                    "Using localhost for the cluster inet port. If you are indeed running a cluster, the cluster members will not join the cluster.");
        } else if (myHostNameFromInfo.equals(machineHostName)) {
            logger.debug(
                    "Hostname from config and hostname from InetAddress match exactly; we are correctly configured "
                            + machineHostName);
        } else if (InetAddressValidator.getInstance().isValid(myHostNameFromInfo)) {
            logger.debug("Using ipAddress for cluster config " + myHostNameFromInfo);
        } else {
            String msg = "The hostname from appliances.xml is " + myHostNameFromInfo
                    + " and from a call to InetAddress.getLocalHost().getCanonicalHostName() (typially FQDN) is "
                    + machineHostName
                    + ". These are not identical. They are probably equivalent but to prevent multiple appliances binding to the same identity we enforce this equality.";
            configlogger.fatal(msg);
            throw new ConfigException(msg);
        }
    } catch (UnknownHostException ex) {
        configlogger.error(
                "Got an UnknownHostException when trying to determine the hostname. This happens when DNS is not set correctly on this machine (for example, when using VM's. See the documentation for InetAddress.getLocalHost().getCanonicalHostName()");
    }

    try {
        String archApplPropertiesFileName = System.getProperty(ARCHAPPL_PROPERTIES_FILENAME);
        if (archApplPropertiesFileName == null) {
            archApplPropertiesFileName = System.getenv(ARCHAPPL_PROPERTIES_FILENAME);
        }
        if (archApplPropertiesFileName == null) {
            archApplPropertiesFileName = new URL(this.getClass().getClassLoader()
                    .getResource(DEFAULT_ARCHAPPL_PROPERTIES_FILENAME).toString()).getPath();
            configlogger.info(
                    "Loading archappl.properties from the webapp classpath " + archApplPropertiesFileName);
        } else {
            configlogger.info("Loading archappl.properties using the environment/JVM property from "
                    + archApplPropertiesFileName);
        }
        try (InputStream is = new FileInputStream(new File(archApplPropertiesFileName))) {
            archapplproperties.load(is);
            configlogger.info(
                    "Done loading installation specific properties file from " + archApplPropertiesFileName);
        } catch (Exception ex) {
            throw new ConfigException(
                    "Exception loading installation specific properties file " + archApplPropertiesFileName,
                    ex);
        }
    } catch (ConfigException cex) {
        throw cex;
    } catch (Exception ex) {
        configlogger.fatal("Exception loading the appliance properties file", ex);
    }

    switch (contextPath) {
    case "/mgmt":
        warFile = WAR_FILE.MGMT;
        this.mgmtRuntime = new MgmtRuntimeState(this);
        break;
    case "/engine":
        warFile = WAR_FILE.ENGINE;
        this.engineContext = new EngineContext(this);
        break;
    case "/retrieval":
        warFile = WAR_FILE.RETRIEVAL;
        this.retrievalState = new RetrievalState(this);
        break;
    case "/etl":
        this.etlPVLookup = new PBThreeTierETLPVLookup(this);
        warFile = WAR_FILE.ETL;
        break;
    default:
        logger.error("We seem to have introduced a new component into the system " + contextPath);
    }

    String pvName2KeyMappingClass = this.getInstallationProperties()
            .getProperty(ARCHAPPL_PVNAME_TO_KEY_MAPPING_CLASSNAME);
    if (pvName2KeyMappingClass == null || pvName2KeyMappingClass.equals("")
            || pvName2KeyMappingClass.length() < 1) {
        logger.info("Using the default key mapping class");
        pvName2KeyConverter = new ConvertPVNameToKey();
        pvName2KeyConverter.initialize(this);
    } else {
        try {
            logger.info("Using " + pvName2KeyMappingClass + " as the name to key mapping class");
            pvName2KeyConverter = (PVNameToKeyMapping) Class.forName(pvName2KeyMappingClass).newInstance();
            pvName2KeyConverter.initialize(this);
        } catch (Exception ex) {
            logger.fatal("Cannot initialize pv name to key mapping class " + pvName2KeyMappingClass, ex);
            throw new ConfigException(
                    "Cannot initialize pv name to key mapping class " + pvName2KeyMappingClass, ex);
        }
    }

    String runtimeFieldsListStr = this.getInstallationProperties()
            .getProperty("org.epics.archiverappliance.config.RuntimeKeys");
    if (runtimeFieldsListStr != null && !runtimeFieldsListStr.isEmpty()) {
        logger.debug("Got runtime fields from the properties file " + runtimeFieldsListStr);
        String[] runTimeFieldsArr = runtimeFieldsListStr.split(",");
        for (String rf : runTimeFieldsArr) {
            this.runTimeFields.add(rf.trim());
        }
    }

    startupExecutor = Executors.newScheduledThreadPool(1, new ThreadFactory() {
        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("Startup executor");
            return t;
        }
    });

    this.addShutdownHook(new Runnable() {
        @Override
        public void run() {
            logger.info("Shutting down startup scheduled executor...");
            startupExecutor.shutdown();
        }
    });

    this.startupState = STARTUP_SEQUENCE.READY_TO_JOIN_APPLIANCE;
    if (this.warFile == WAR_FILE.MGMT) {
        logger.info("Scheduling webappReady's for the mgmt webapp ");
        MgmtPostStartup mgmtPostStartup = new MgmtPostStartup(this);
        ScheduledFuture<?> postStartupFuture = startupExecutor.scheduleAtFixedRate(mgmtPostStartup, 10, 20,
                TimeUnit.SECONDS);
        mgmtPostStartup.setCancellingFuture(postStartupFuture);
    } else {
        logger.info("Scheduling webappReady's for the non-mgmt webapp " + this.warFile.toString());
        NonMgmtPostStartup nonMgmtPostStartup = new NonMgmtPostStartup(this, this.warFile.toString());
        ScheduledFuture<?> postStartupFuture = startupExecutor.scheduleAtFixedRate(nonMgmtPostStartup, 10, 20,
                TimeUnit.SECONDS);
        nonMgmtPostStartup.setCancellingFuture(postStartupFuture);
    }

    // Measure some JMX metrics once a minute
    startupExecutor.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            processMetrics.takeMeasurement();
        }
    }, 60, 60, TimeUnit.SECONDS);
}

From source file:com.metamx.druid.realtime.RealtimePlumberSchool.java

private void initializeExecutors() {
    if (persistExecutor == null) {
        persistExecutor = Executors.newFixedThreadPool(1,
                new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
    }/*from  ww w  .j a v a2s  .  c o  m*/
    if (scheduledExecutor == null) {
        scheduledExecutor = Executors.newScheduledThreadPool(1,
                new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
    }
}

From source file:com.cloud.baremetal.BareMetalVmManagerImpl.java

@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
    _name = name;/*from   w  w  w.ja  va  2 s.c om*/

    ComponentLocator locator = ComponentLocator.getCurrentLocator();
    _configDao = locator.getDao(ConfigurationDao.class);
    if (_configDao == null) {
        throw new ConfigurationException("Unable to get the configuration dao.");
    }

    Map<String, String> configs = _configDao.getConfiguration("AgentManager", params);

    _instance = configs.get("instance.name");
    if (_instance == null) {
        _instance = "DEFAULT";
    }

    String workers = configs.get("expunge.workers");
    int wrks = NumbersUtil.parseInt(workers, 10);

    String time = configs.get("expunge.interval");
    _expungeInterval = NumbersUtil.parseInt(time, 86400);

    time = configs.get("expunge.delay");
    _expungeDelay = NumbersUtil.parseInt(time, _expungeInterval);

    _executor = Executors.newScheduledThreadPool(wrks, new NamedThreadFactory("UserVm-Scavenger"));

    _itMgr.registerGuru(Type.UserBareMetal, this);
    VirtualMachine.State.getStateMachine().registerListener(this);

    s_logger.info("User VM Manager is configured.");

    return true;
}

From source file:org.apache.flume.sink.hdfs.HDFSEventSink.java

@Override
public void start() {
    String timeoutName = "hdfs-" + getName() + "-call-runner-%d";
    callTimeoutPool = Executors.newFixedThreadPool(threadsPoolSize,
            new ThreadFactoryBuilder().setNameFormat(timeoutName).build());

    String rollerName = "hdfs-" + getName() + "-roll-timer-%d";
    timedRollerPool = Executors.newScheduledThreadPool(rollTimerPoolSize,
            new ThreadFactoryBuilder().setNameFormat(rollerName).build());

    this.sfWriters = new WriterLinkedHashMap(maxOpenFiles);
    sinkCounter.start();//from w w  w .  ja v a2 s.  c  o  m
    super.start();
}