List of usage examples for org.joda.time DateTime now
public static DateTime now()
ISOChronology
in the default time zone. From source file:com.amazonaws.services.kinesis.log4j.helpers.AsyncPutCallStatsReporter.java
License:Open Source License
/** * This method is invoked when a log record is successfully sent to Kinesis. * Though this is not too useful for production use cases, it provides a good * debugging tool while tweaking parameters for the appender. *//*from w w w . j av a2s . c om*/ @Override public void onSuccess(PutRecordRequest request, PutRecordResult result) { successfulRequestCount++; if (logger.isDebugEnabled() && (successfulRequestCount + failedRequestCount) % 3000 == 0) { logger.debug("Appender (" + appenderName + ") made " + successfulRequestCount + " successful put requests out of total " + (successfulRequestCount + failedRequestCount) + " in " + PeriodFormat.getDefault().print(new Period(startTime, DateTime.now())) + " since start"); } }
From source file:com.anrisoftware.prefdialog.miscswing.logwindowdock.ErrorNode.java
License:Open Source License
@Inject ErrorNode(@Assisted ErrorCategory category) { super(category); this.time = DateTime.now(); }
From source file:com.anrisoftware.prefdialog.miscswing.logwindowdock.InfoNode.java
License:Open Source License
@Inject InfoNode(@Assisted InfoCategory category) { super(category); this.time = DateTime.now(); }
From source file:com.arpnetworking.clusteraggregator.aggregation.Aggregator.java
License:Apache License
private void processAggregationMessage(final AggregatedData data) { //First message sets the data we know about this actor if (!_initialized) { _period = data.getPeriod();/*from w ww .j a v a2s . co m*/ _cluster = data.getFQDSN().getCluster(); _metric = data.getFQDSN().getMetric(); _service = data.getFQDSN().getService(); _statistic = data.getFQDSN().getStatistic(); _resultBuilder = new AggregatedData.Builder() .setFQDSN(new FQDSN.Builder().setCluster(_cluster).setMetric(_metric).setService(_service) .setStatistic(_statistic).build()) .setHost(_cluster + "-cluster").setPeriod(_period).setPopulationSize(1L) .setSamples(Collections.<Quantity>emptyList()) .setStart(DateTime.now().hourOfDay().roundFloorCopy()) .setValue(new Quantity(0, Optional.<Unit>absent())); _lifecycleTracker.tell(new AggregatorLifecycle.NotifyAggregatorStarted(_resultBuilder.build()), getSelf()); _initialized = true; _log.debug(String.format("Initialized aggregator for %s %s %s %s %s", _cluster, _service, _metric, _statistic, _period)); } else if (!(_period.equals(data.getPeriod())) && _cluster.equals(data.getFQDSN().getCluster()) && _service.equals(data.getFQDSN().getService()) && _metric.equals(data.getFQDSN().getMetric()) && _statistic.equals(data.getFQDSN().getStatistic())) { _log.error(String.format("Aggregator for %s %s %s %s %s received a message with %s %s %s %s %s", _cluster, _service, _metric, _statistic, _period, data.getFQDSN().getCluster(), data.getFQDSN().getService(), data.getFQDSN().getMetric(), data.getFQDSN().getStatistic(), data.getPeriod())); } //Find the time bucket to dump this in if (_aggBuckets.size() > 0 && _aggBuckets.getFirst().getPeriodStart().isAfter(data.getPeriodStart())) { //We got a bit of data that is too old for us to aggregate. _log.warning(String.format( "Received a work item that is too old to aggregate: work item period starts at %s, " + "bucket period starts at %s", data.getPeriodStart(), _aggBuckets.getFirst().getPeriodStart())); } else { if (_aggBuckets.size() == 0 || _aggBuckets.getLast().getPeriodStart().isBefore(data.getPeriodStart())) { //We need to create a new bucket to hold this data. _log.debug("Creating new aggregation bucket for period starting at " + data.getPeriodStart()); _aggBuckets.add(new AggregationBucket(data.getPeriodStart())); } final Iterator<AggregationBucket> bucketIterator = _aggBuckets.iterator(); AggregationBucket currentBucket; AggregationBucket correctBucket = null; while (bucketIterator.hasNext()) { currentBucket = bucketIterator.next(); if (currentBucket.getPeriodStart().equals(data.getPeriodStart())) { //We found the correct bucket correctBucket = currentBucket; break; } } if (correctBucket == null) { _log.error("No bucket found to aggregate into, bug in the bucket walk"); } else { correctBucket.getAggregatedData().add(data); } } }
From source file:com.arpnetworking.clusteraggregator.aggregation.StreamingAggregator.java
License:Apache License
private void processAggregationMessage(final Messages.StatisticSetRecord data) { final CombinedMetricData metricData = CombinedMetricData.Builder.fromStatisticSetRecord(data).build(); //First message sets the data we know about this actor if (!_initialized) { _period = metricData.getPeriod(); _cluster = metricData.getCluster(); _metric = metricData.getMetricName(); _service = metricData.getService(); _resultBuilder = new AggregatedData.Builder().setHost(createHost()).setPeriod(_period) .setPopulationSize(1L).setSamples(Collections.<Quantity>emptyList()) .setStart(DateTime.now().hourOfDay().roundFloorCopy()) .setValue(new Quantity.Builder().setValue(0d).build()); _initialized = true;//from ww w . j a va 2 s .com LOGGER.debug().setMessage("Initialized aggregator").addContext("actor", self()).log(); } else if (!(_period.equals(metricData.getPeriod()) && _cluster.equals(metricData.getCluster()) && _service.equals(metricData.getService()) && _metric.equals(metricData.getMetricName()))) { LOGGER.error().setMessage("Received a work item for another aggregator").addData("workItem", data) .addContext("actor", self()).log(); } //Find the time bucket to dump this in final DateTime periodStart = DateTime.parse(data.getPeriodStart()); if (_aggBuckets.size() > 0 && _aggBuckets.getFirst().getPeriodStart().isAfter(periodStart)) { //We got a bit of data that is too old for us to aggregate. LOGGER.warn().setMessage("Received a work item that is too old to aggregate") .addData("bucketStart", _aggBuckets.getFirst().getPeriodStart()).addData("workItem", data) .addContext("actor", self()).log(); } else { if (_aggBuckets.size() == 0 || _aggBuckets.getLast().getPeriodStart().isBefore(periodStart)) { //We need to create a new bucket to hold this data. LOGGER.debug().setMessage("Creating new aggregation bucket for period") .addData("period", periodStart).addContext("actor", self()).log(); _aggBuckets.add(new StreamingAggregationBucket(periodStart)); } final Iterator<StreamingAggregationBucket> bucketIterator = _aggBuckets.iterator(); StreamingAggregationBucket currentBucket; StreamingAggregationBucket correctBucket = null; while (bucketIterator.hasNext()) { currentBucket = bucketIterator.next(); if (currentBucket.getPeriodStart().equals(periodStart)) { //We found the correct bucket correctBucket = currentBucket; break; } } if (correctBucket == null) { LOGGER.error().setMessage("No bucket found to aggregate into, bug in the bucket walk") .addContext("actor", self()).log(); } else { LOGGER.debug().setMessage("Updating bucket").addData("bucket", correctBucket) .addData("data", metricData).addContext("actor", self()).log(); correctBucket.update(metricData); LOGGER.debug().setMessage("Done updating bucket").addData("bucket", correctBucket) .addContext("actor", self()).log(); } } }
From source file:com.arpnetworking.clusteraggregator.client.AggClientConnection.java
License:Apache License
private Optional<AggregatedData> getAggData(final Messages.LegacyAggRecord aggRecord) { try {//from ww w . j a va 2s . c o m long sampleCount = 1; if (aggRecord.hasRawSampleCount()) { sampleCount = aggRecord.getRawSampleCount(); } else if (aggRecord.getStatisticSamplesCount() > 0) { sampleCount = aggRecord.getStatisticSamplesCount(); } final Period period = Period.parse(aggRecord.getPeriod()); DateTime periodStart; if (aggRecord.hasPeriodStart()) { periodStart = DateTime.parse(aggRecord.getPeriodStart()); } else { periodStart = DateTime.now().withTime(DateTime.now().getHourOfDay(), 0, 0, 0); while (periodStart.plus(period).isBeforeNow()) { periodStart = periodStart.plus(period); } } final Optional<Statistic> statisticOptional = _statisticFactory .createStatistic(aggRecord.getStatistic()); if (!statisticOptional.isPresent()) { _log.error(String.format("Unsupported statistic %s", aggRecord.getStatistic())); return Optional.absent(); } return Optional.of(new AggregatedData.Builder().setHost(_hostName.get()) .setFQDSN(new FQDSN.Builder().setCluster(_clusterName.get()).setService(aggRecord.getService()) .setMetric(aggRecord.getMetric()).setStatistic(statisticOptional.get()).build()) .setPeriod(Period.parse(aggRecord.getPeriod())).setStart(periodStart) .setPopulationSize(sampleCount) .setSamples(sampleizeDoubles(aggRecord.getStatisticSamplesList(), Optional.<Unit>absent())) .setValue(new Quantity(aggRecord.getStatisticValue(), Optional.<Unit>absent())).build()); // CHECKSTYLE.OFF: IllegalCatch - The legacy parsing can throw a variety of runtime exceptions } catch (final RuntimeException e) { // CHECKSTYLE.ON: IllegalCatch _log.error("Caught an error parsing legacy agg record", e); return Optional.absent(); } }
From source file:com.arpnetworking.metrics.common.tailer.FilePositionStore.java
License:Apache License
/** * {@inheritDoc}/*from w ww .j av a 2 s . c om*/ */ @Override public void setPosition(final String identifier, final long position) { final Descriptor descriptor = _state.putIfAbsent(identifier, new Descriptor.Builder().setPosition(position).build()); final DateTime now = DateTime.now(); boolean requiresFlush = now.minus(_flushInterval).isAfter(_lastFlush); if (descriptor != null) { descriptor.update(position, now); requiresFlush = requiresFlush || descriptor.getDelta() > _flushThreshold; } if (requiresFlush) { flush(); } }
From source file:com.arpnetworking.metrics.common.tailer.FilePositionStore.java
License:Apache License
private void flush() { // Age out old state final DateTime now = DateTime.now(); final DateTime oldest = now.minus(_retention); final long sizeBefore = _state.size(); final Iterator<Map.Entry<String, Descriptor>> iterator = _state.entrySet().iterator(); while (iterator.hasNext()) { final Map.Entry<String, Descriptor> entry = iterator.next(); if (!oldest.isBefore(entry.getValue().getLastUpdated())) { // Remove old descriptors iterator.remove();/* w ww.j a v a 2s . c o m*/ } else { // Mark retained descriptors as flushed entry.getValue().flush(); } } final long sizeAfter = _state.size(); if (sizeBefore != sizeAfter) { LOGGER.debug().setMessage("Removed old entries from file position store") .addData("sizeBefore", sizeBefore).addData("sizeAfter", sizeAfter).log(); } // Persist the state to disk try { final Path temporaryFile = Paths.get(_file.toAbsolutePath().toString() + ".tmp"); OBJECT_MAPPER.writeValue(temporaryFile.toFile(), _state); Files.move(temporaryFile, _file, StandardCopyOption.REPLACE_EXISTING); LOGGER.debug().setMessage("Persisted file position state to disk").addData("size", _state.size()) .addData("file", _file).log(); } catch (final IOException ioe) { throw Throwables.propagate(ioe); } finally { _lastFlush = now; } }
From source file:com.arpnetworking.metrics.generator.Generator.java
License:Apache License
private void generateTestFiles(final RandomGenerator mersenneTwister) throws IOException { //TODO(barp): Set these parameters from command line args [ISSUE-1] final List<Integer> metricSamplesPerUOW = Lists.newArrayList(1, 5, 25); final List<Integer> uowPerInterval = Lists.newArrayList(10000, 50000, 250000); final List<Integer> metricNamesPerUOW = Lists.newArrayList(1, 10, 100); final String clusterName = "MyPerformanceTestedCluster"; final String serviceName = "MyPerformanceTestedService"; final DateTime start = DateTime.now().hourOfDay().roundFloorCopy(); final DateTime stop = start.plusMinutes(10); for (final Integer uowCount : uowPerInterval) { for (final Integer namesCount : metricNamesPerUOW) { for (final Integer samplesCount : metricSamplesPerUOW) { final Path fileName = Paths .get(String.format("logs/r_%08d_m_%03d_s_%03d", uowCount, namesCount, samplesCount)); final TestFileGenerator testFileGenerator = new TestFileGenerator.Builder() .setRandom(mersenneTwister).setUnitOfWorkCount(uowCount).setNamesCount(namesCount) .setSamplesCount(samplesCount).setStartTime(start).setEndTime(stop) .setFileName(fileName).setClusterName(clusterName).setServiceName(serviceName).build(); testFileGenerator.generate(); }//from w ww .j a v a 2s . c om } } }
From source file:com.arpnetworking.metrics.generator.util.RealTimeExecutor.java
License:Apache License
/** * Public constructor./*w ww. ja v a 2 s. co m*/ * * @param generators List of UOW generators. * @param outputPath File to write metrics to. * @param clusterName The cluster to generate metrics for. * @param serviceName The service to generate metrics for. */ public RealTimeExecutor(final List<UnitOfWorkSchedule> generators, final Path outputPath, final String clusterName, final String serviceName) { _generators = generators; _workEntries = new PriorityQueue<>(generators.size(), new WorkItemOrdering()); _modifyingSink = new GeneratorSink(outputPath, DateTime.now()); _metricsFactory = new TsdMetricsFactory.Builder().setClusterName(clusterName).setServiceName(serviceName) .setSinks(Collections.<Sink>singletonList(_modifyingSink)).build(); }