Example usage for org.joda.time DateTime plus

List of usage examples for org.joda.time DateTime plus

Introduction

In this page you can find the example usage for org.joda.time DateTime plus.

Prototype

public DateTime plus(ReadablePeriod period) 

Source Link

Document

Returns a copy of this datetime with the specified period added.

Usage

From source file:org.apache.abdera2.common.date.DateTimes.java

License:Apache License

public static Range<DateTime> afterBetweenOrAt(DateTime low, Duration duration) {
    return afterBetweenOrAt(low, low.plus(duration));
}

From source file:org.apache.abdera2.common.date.DateTimes.java

License:Apache License

public static Range<DateTime> exactlyAfter(DateTime date, Duration duration) {
    return exactly(date.plus(duration));
}

From source file:org.apache.abdera2.common.date.DateTimes.java

License:Apache License

public static Selector<DateTime> selectorForAtOrBetween(DateTime low, Duration duration) {
    return selectorForRange(atOrBetween(low, low.plus(duration)));
}

From source file:org.apache.druid.indexing.seekablestream.supervisor.SeekableStreamSupervisor.java

License:Apache License

private void checkTaskDuration() throws ExecutionException, InterruptedException, TimeoutException {
    final List<ListenableFuture<Map<PartitionIdType, SequenceOffsetType>>> futures = new ArrayList<>();
    final List<Integer> futureGroupIds = new ArrayList<>();

    for (Entry<Integer, TaskGroup> entry : activelyReadingTaskGroups.entrySet()) {
        Integer groupId = entry.getKey();
        TaskGroup group = entry.getValue();

        // find the longest running task from this group
        DateTime earliestTaskStart = DateTimes.nowUtc();
        for (TaskData taskData : group.tasks.values()) {
            if (taskData.startTime != null && earliestTaskStart.isAfter(taskData.startTime)) {
                earliestTaskStart = taskData.startTime;
            }/*  www. jav a  2s  . com*/
        }

        // if this task has run longer than the configured duration, signal all tasks in the group to persist
        if (earliestTaskStart.plus(ioConfig.getTaskDuration()).isBeforeNow()) {
            log.info("Task group [%d] has run for [%s]", groupId, ioConfig.getTaskDuration());
            futureGroupIds.add(groupId);
            futures.add(checkpointTaskGroup(group, true));
        }
    }

    List<Map<PartitionIdType, SequenceOffsetType>> results = Futures.successfulAsList(futures)
            .get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int j = 0; j < results.size(); j++) {
        Integer groupId = futureGroupIds.get(j);
        TaskGroup group = activelyReadingTaskGroups.get(groupId);
        Map<PartitionIdType, SequenceOffsetType> endOffsets = results.get(j);

        if (endOffsets != null) {
            // set a timeout and put this group in pendingCompletionTaskGroups so that it can be monitored for completion
            group.completionTimeout = DateTimes.nowUtc().plus(ioConfig.getCompletionTimeout());
            pendingCompletionTaskGroups.computeIfAbsent(groupId, k -> new CopyOnWriteArrayList<>()).add(group);

            // set endOffsets as the next startOffsets
            for (Entry<PartitionIdType, SequenceOffsetType> entry : endOffsets.entrySet()) {
                partitionGroups.get(groupId).put(entry.getKey(), entry.getValue());
            }
        } else {
            for (String id : group.taskIds()) {
                killTask(id, "All tasks in group [%s] failed to transition to publishing state", groupId);
            }
            // clear partitionGroups, so that latest sequences from db is used as start sequences not the stale ones
            // if tasks did some successful incremental handoffs
            partitionGroups.get(groupId).replaceAll((partition, sequence) -> getNotSetMarker());
        }

        // remove this task group from the list of current task groups now that it has been handled
        activelyReadingTaskGroups.remove(groupId);
    }
}

From source file:org.apache.druid.java.util.common.granularity.DurationGranularity.java

License:Apache License

@Override
public DateTime increment(DateTime time) {
    return time.plus(getDuration());
}

From source file:org.apache.druid.java.util.common.granularity.NoneGranularity.java

License:Apache License

@Override
public DateTime increment(DateTime time) {
    return time.plus(1);
}

From source file:org.apache.druid.query.groupby.GroupByQuery.java

License:Apache License

/**
 * Computes the timestamp that will be returned by {@link #getUniversalTimestamp()}.
 *//*  ww  w  . j av  a2s.  c om*/
@Nullable
private DateTime computeUniversalTimestamp() {
    final String timestampStringFromContext = getContextValue(CTX_KEY_FUDGE_TIMESTAMP, "");
    final Granularity granularity = getGranularity();

    if (!timestampStringFromContext.isEmpty()) {
        return DateTimes.utc(Long.parseLong(timestampStringFromContext));
    } else if (Granularities.ALL.equals(granularity)) {
        final DateTime timeStart = getIntervals().get(0).getStart();
        return granularity.getIterable(new Interval(timeStart, timeStart.plus(1))).iterator().next().getStart();
    } else {
        return null;
    }
}

From source file:org.apache.druid.query.groupby.strategy.GroupByStrategyV2.java

License:Apache License

/**
 * If "query" has a single universal timestamp, return it. Otherwise return null. This is useful
 * for keeping timestamps in sync across partial queries that may have different intervals.
 *
 * @param query the query/*  ww w.  j  a  v a2  s.c  o m*/
 *
 * @return universal timestamp, or null
 */
public static DateTime getUniversalTimestamp(final GroupByQuery query) {
    final Granularity gran = query.getGranularity();
    final String timestampStringFromContext = query.getContextValue(CTX_KEY_FUDGE_TIMESTAMP, "");

    if (!timestampStringFromContext.isEmpty()) {
        return DateTimes.utc(Long.parseLong(timestampStringFromContext));
    } else if (Granularities.ALL.equals(gran)) {
        final DateTime timeStart = query.getIntervals().get(0).getStart();
        return gran.getIterable(new Interval(timeStart, timeStart.plus(1))).iterator().next().getStart();
    } else {
        return null;
    }
}

From source file:org.apache.hadoop.dynamodb.DynamoDBFibonacciRetryer.java

License:Open Source License

public <T> RetryResult<T> runWithRetry(Callable<T> callable, Reporter reporter, PrintCounter retryCounter) {
    fib1 = 0;/*from  w w w .ja v a 2 s  .c o m*/
    fib2 = 1;
    retryCount = 0;
    DateTime currentTime = new DateTime(DateTimeZone.UTC);
    DateTime retryEndTime = currentTime.plus(retryPeriod);

    while (true) {
        if (isShutdown) {
            log.info("Is shut down, giving up and returning null");
            return null;
        }

        try {
            T returnObj = callable.call();
            return new RetryResult<>(returnObj, retryCount);
        } catch (Exception e) {
            handleException(retryEndTime, e, reporter, retryCounter);
        }
    }
}

From source file:org.apereo.portal.events.aggr.PortalEventDimensionPopulatorImpl.java

License:Apache License

final void doPopulateDateDimensions() {
    final DateTime now = getNow();

    final AggregationIntervalInfo startIntervalInfo;
    final DateTime oldestPortalEventTimestamp = this.portalEventDao.getOldestPortalEventTimestamp();
    if (oldestPortalEventTimestamp == null || now.isBefore(oldestPortalEventTimestamp)) {
        startIntervalInfo = this.intervalHelper.getIntervalInfo(AggregationInterval.YEAR,
                now.minus(this.dimensionBuffer));
    } else {//www.  j a  v  a  2s  .  c  o m
        startIntervalInfo = this.intervalHelper.getIntervalInfo(AggregationInterval.YEAR,
                oldestPortalEventTimestamp.minus(this.dimensionBuffer));
    }

    final AggregationIntervalInfo endIntervalInfo;
    final DateTime newestPortalEventTimestamp = this.portalEventDao.getNewestPortalEventTimestamp();
    if (newestPortalEventTimestamp == null || now.isAfter(newestPortalEventTimestamp)) {
        endIntervalInfo = this.intervalHelper.getIntervalInfo(AggregationInterval.YEAR,
                now.plus(this.dimensionBuffer));
    } else {
        endIntervalInfo = this.intervalHelper.getIntervalInfo(AggregationInterval.YEAR,
                newestPortalEventTimestamp.plus(this.dimensionBuffer));
    }

    final DateMidnight start = startIntervalInfo.getStart().toDateMidnight();
    final DateMidnight end = endIntervalInfo.getEnd().toDateMidnight();

    doPopulateDateDimensions(start, end);
}