Example usage for org.joda.time DateTime now

List of usage examples for org.joda.time DateTime now

Introduction

In this page you can find the example usage for org.joda.time DateTime now.

Prototype

public static DateTime now() 

Source Link

Document

Obtains a DateTime set to the current system millisecond time using ISOChronology in the default time zone.

Usage

From source file:com.arpnetworking.metrics.generator.util.RealTimeExecutor.java

License:Apache License

/**
 * Generates metrics./*from w  w w.  ja  v  a2 s. co  m*/
 */
public void execute() {
    for (final UnitOfWorkSchedule generator : _generators) {
        final long unitStart = generator.getScheduler()
                .next(TimeUnit.NANOSECONDS.convert(DateTime.now().getMillis(), TimeUnit.MILLISECONDS));
        _workEntries.add(new WorkEntry(generator, unitStart));
    }
    while (true) {
        if (_workEntries.isEmpty()) {
            break;
        }
        final WorkEntry entry = _workEntries.peek();
        final DateTime executeTime = new DateTime(
                TimeUnit.MILLISECONDS.convert(entry.getCurrentValue(), TimeUnit.NANOSECONDS));
        if (executeTime.isAfterNow()) {
            try {
                Thread.sleep(10);
            } catch (final InterruptedException ignored) {
                Thread.interrupted();
                return;
            }
            continue;
        }
        _workEntries.poll();
        _modifyingSink.setTime(
                new DateTime(TimeUnit.MILLISECONDS.convert(entry.getCurrentValue(), TimeUnit.NANOSECONDS)));
        entry.getSchedule().getGenerator().generate(_metricsFactory);
        final WorkEntry newEntry = new WorkEntry(entry.getSchedule(),
                entry.getSchedule().getScheduler().next(entry.getCurrentValue()));
        _workEntries.add(newEntry);
    }
}

From source file:com.arpnetworking.metrics.mad.PeriodWorker.java

License:Apache License

/**
 * {@inheritDoc}// ww w  .java2s.c o m
 */
@Override
public void run() {
    Thread.currentThread()
            .setUncaughtExceptionHandler((thread, throwable) -> LOGGER.error().setMessage("Unhandled exception")
                    .addData("periodWorker", PeriodWorker.this).setThrowable(throwable).log());

    while (_isRunning) {
        try {
            DateTime now = DateTime.now();
            final DateTime rotateAt = getRotateAt(now);
            Duration timeToRotate = new Duration(now, rotateAt);
            while (_isRunning && timeToRotate.isLongerThan(Duration.ZERO)) {
                // Process records or sleep
                Record recordToProcess = _recordQueue.poll();
                if (recordToProcess != null) {
                    while (recordToProcess != null) {
                        process(recordToProcess);
                        recordToProcess = _recordQueue.poll();
                    }
                } else {
                    Thread.sleep(Math.min(timeToRotate.getMillis(), 100));
                }
                // Recompute time to close
                now = DateTime.now();
                timeToRotate = new Duration(now, rotateAt);
            }
            // Drain the record queue before rotating
            final List<Record> recordsToProcess = Lists.newArrayList();
            _recordQueue.drainTo(recordsToProcess);
            for (final Record recordToProcess : recordsToProcess) {
                process(recordToProcess);
            }
            // Rotate
            rotate(now);
        } catch (final InterruptedException e) {
            Thread.interrupted();
            LOGGER.warn().setMessage("Interrupted waiting to close buckets").setThrowable(e).log();
            // CHECKSTYLE.OFF: IllegalCatch - Top level catch to prevent thread death
        } catch (final Exception e) {
            // CHECKSTYLE.ON: IllegalCatch
            LOGGER.error().setMessage("Aggregator failure").addData("periodWorker", this).setThrowable(e).log();
        }
    }
}

From source file:com.arpnetworking.metrics.mad.PeriodWorker.java

License:Apache License

void process(final Record record) {
    // Find an existing bucket for the record
    final Duration timeout = getPeriodTimeout(_period);
    final DateTime start = getStartTime(record.getTime(), _period);
    final DateTime expiration = max(DateTime.now().plus(timeout), start.plus(_period).plus(timeout));
    Bucket bucket = _bucketsByStart.get(start);

    // Create a new bucket if one does not exist
    if (bucket == null) {
        // Pre-emptively add the record to the _new_ bucket. This avoids
        // the race condition after indexing by expiration between adding
        // the record and closing the bucket.
        final Bucket newBucket = _bucketBuilder.setStart(start).build();
        newBucket.add(record);/*from   w  ww.ja  va  2  s.  co m*/

        // Resolve bucket creation race condition; either:
        // 1) We won and can proceed to index the new bucket
        // 2) We lost and can proceed to add data to the existing bucket
        bucket = _bucketsByStart.putIfAbsent(start, newBucket);
        if (bucket == null) {
            LOGGER.debug().setMessage("Created new bucket").addData("bucket", newBucket)
                    .addData("expiration", expiration).addData("trigger", record.getId()).log();

            // Index the bucket by its expiration date; the expiration date is always in the future
            _bucketsByExpiration.compute(expiration, (dateTime, buckets) -> {
                if (buckets == null) {
                    buckets = Lists.newArrayList();
                }
                buckets.add(newBucket);
                return buckets;
            });

            // New bucket created and indexed with record
            return;
        }
    }

    // Add the record to the _existing_ bucket
    bucket.add(record);
}

From source file:com.arpnetworking.metrics.proxy.models.protocol.v2.LogMessagesProcessor.java

License:Apache License

private DateTime extractTimestamp(final String line) {
    // TODO(vkoskela): Implement different timestamp extract strategies [MAI-409]
    return DateTime.now();
}

From source file:com.arpnetworking.tsdcore.sinks.AggregationServerSink.java

License:Apache License

private void heartbeat() {

    final Messages.HeartbeatRecord message = Messages.HeartbeatRecord.newBuilder()
            .setTimestamp(DateTime.now().toString()).build();
    sendRawData(AggregationMessage.create(message).serialize());
    LOGGER.debug().setMessage("Heartbeat sent to aggregation server").addData("sink", getName()).log();
}

From source file:com.arpnetworking.tsdcore.sinks.LimitingSink.java

License:Apache License

/**
 * {@inheritDoc}/*from   w  ww  . j a va2  s .  c o  m*/
 */
@Override
public void recordAggregateData(final Collection<AggregatedData> data, final Collection<Condition> conditions) {
    final DateTime now = DateTime.now();
    final List<AggregatedData> filteredData = Lists.newArrayListWithExpectedSize(data.size());
    final Map<FQDSN, Condition> conditionsByFQDSN = Maps.uniqueIndex(conditions,
            new Function<Condition, FQDSN>() {
                @Override
                public FQDSN apply(final Condition condition) {
                    return condition.getFQDSN();
                }
            });
    long limited = 0;
    for (final AggregatedData datum : data) {
        if (_metricsLimiter.offer(datum, now)) {
            filteredData.add(datum);
        } else {
            LOGGER.warn(String.format("%s: Skipping publication of limited data; aggregatedData=%s", getName(),
                    datum));
            ++limited;

            // Remove any condition for the FQDSN
            // NOTE: Although limiting also contains period, the data produced
            // in any one invocation of the sink is for a single period we can
            // safely ignore that and find any matching conditions by FQDSN.
            conditionsByFQDSN.remove(datum.getFQDSN());
        }
    }
    _limited.getAndAdd(limited);
    _sink.recordAggregateData(filteredData, conditionsByFQDSN.values());
}

From source file:com.arpnetworking.tsdcore.sinks.VertxSink.java

License:Apache License

private void consumeLoop() {
    long flushedBytes = 0;
    try {// w  w w.j a  va2  s .  com
        boolean done = false;
        NetSocket socket = _socket.get();
        if (!_pendingData.isEmpty()) {
            LOGGER.debug().setMessage("Pending data").addData("sink", getName())
                    .addData("size", _pendingData.size()).log();
        }
        while (socket != null && !done) {
            if (_pendingData.size() > 0 && flushedBytes < MAX_FLUSH_BYTES) {
                final Buffer buffer = _pendingData.poll();
                flushedBytes += flushBuffer(buffer, socket);
            } else {
                done = true;
            }
            socket = _socket.get();
        }
        if (socket == null && (_lastNotConnectedNotify == null
                || _lastNotConnectedNotify.plus(Duration.standardSeconds(30)).isBeforeNow())) {
            LOGGER.debug().setMessage("Not connected to server. Data will be flushed when reconnected. "
                    + "Suppressing this message for 30 seconds.").addData("sink", getName()).log();
            _lastNotConnectedNotify = DateTime.now();
        }
        // CHECKSTYLE.OFF: IllegalCatch - Vertx might not log
    } catch (final Exception e) {
        // CHECKSTYLE.ON: IllegalCatch
        LOGGER.error().setMessage("Error in consume loop").addData("sink", getName()).setThrowable(e).log();
        throw e;
    } finally {
        if (flushedBytes > 0) {
            dispatch(event -> consumeLoop());
        } else {
            getVertx().setTimer(NO_DATA_CONSUME_LOOP_INTERVAL, event -> consumeLoop());
        }
    }
}

From source file:com.arpnetworking.tsdcore.tailer.FilePositionStore.java

License:Apache License

/**
 * {@inheritDoc}/*w w w .  j a v a 2 s.c o  m*/
 */
@Override
public void setPosition(final String identifier, final long position) {
    final Descriptor descriptor = _state.putIfAbsent(identifier,
            new Descriptor.Builder().setPosition(Long.valueOf(position)).build());

    final DateTime now = DateTime.now();
    boolean requiresFlush = now.minus(_flushInterval).isAfter(_lastFlush);
    if (descriptor != null) {
        descriptor.update(position, now);
        requiresFlush = requiresFlush || descriptor.getDelta() > _flushThreshold;
    }
    if (requiresFlush) {
        flush();
    }
}

From source file:com.arpnetworking.tsdcore.tailer.FilePositionStore.java

License:Apache License

private void flush() {
    // Age out old state
    final DateTime now = DateTime.now();
    final DateTime oldest = now.minus(_retention);
    final long sizeBefore = _state.size();
    Maps.filterEntries(_state, new Predicate<Map.Entry<String, Descriptor>>() {
        @Override/*from w ww.j a v  a 2  s  .com*/
        public boolean apply(final Map.Entry<String, Descriptor> entry) {
            return oldest.isBefore(entry.getValue().getLastUpdated());
        }
    });
    final long sizeAfter = _state.size();
    if (sizeBefore != sizeAfter) {
        LOGGER.debug(String.format("Removed old entries from file position store; sizeBefore=%d, sizeAfter=%d",
                Long.valueOf(sizeBefore), Long.valueOf(sizeAfter)));
    }

    // Persist the state to disk
    try {
        OBJECT_MAPPER.writeValue(_file, _state);

        LOGGER.debug(String.format("Persisted file position state to disk; size=%d, file=%s",
                Long.valueOf(_state.size()), _file));
    } catch (final IOException ioe) {
        Throwables.propagate(ioe);
    } finally {
        _lastFlush = now;
    }
}

From source file:com.arpnetworking.utility.RateLimitLogger.java

License:Apache License

/**
 * Triggers logging of the message./*from  w  ww  . j a  v a  2  s  . c om*/
 */
public void log() {
    if (_lastWrite.isPresent()) {
        final DateTime last = _lastWrite.get();
        if (last.plus(_period).isBeforeNow()) {
            if (_suppressed == 0) {
                _loggingAdapter.log(_level.asInt(), _message);
            } else {
                _loggingAdapter.log(_level.asInt(), String.format("%s (suppressed=%d, lastLogged=%s)", _message,
                        _suppressed, new PrettyTime().format(_lastWrite.get().toDate())));
            }
            _lastWrite = Optional.of(DateTime.now());
        } else {
            _suppressed++;
        }
    } else {
        _loggingAdapter.log(_level.asInt(), _message);
        _lastWrite = Optional.of(DateTime.now());
    }

}