Example usage for org.joda.time Interval getStart

List of usage examples for org.joda.time Interval getStart

Introduction

In this page you can find the example usage for org.joda.time Interval getStart.

Prototype

public DateTime getStart() 

Source Link

Document

Gets the start of this time interval, which is inclusive, as a DateTime.

Usage

From source file:org.apache.druid.indexing.overlord.TaskLockbox.java

License:Apache License

/**
 * Release lock held for a task on a particular interval. Does nothing if the task does not currently
 * hold the mentioned lock.//from   ww w  . jav  a2 s .  co  m
 *
 * @param task task to unlock
 * @param interval interval to unlock
 */
public void unlock(final Task task, final Interval interval, @Nullable Integer partitionId) {
    giant.lock();

    try {
        final String dataSource = task.getDataSource();
        final NavigableMap<DateTime, SortedMap<Interval, List<TaskLockPosse>>> dsRunning = running
                .get(task.getDataSource());

        if (dsRunning == null || dsRunning.isEmpty()) {
            return;
        }

        final SortedMap<Interval, List<TaskLockPosse>> intervalToPosses = dsRunning.get(interval.getStart());

        if (intervalToPosses == null || intervalToPosses.isEmpty()) {
            return;
        }

        final List<TaskLockPosse> possesHolder = intervalToPosses.get(interval);
        if (possesHolder == null || possesHolder.isEmpty()) {
            return;
        }

        final List<TaskLockPosse> posses = possesHolder.stream().filter(posse -> posse.containsTask(task))
                .collect(Collectors.toList());

        for (TaskLockPosse taskLockPosse : posses) {
            final TaskLock taskLock = taskLockPosse.getTaskLock();

            final boolean match = (partitionId == null
                    && taskLock.getGranularity() == LockGranularity.TIME_CHUNK)
                    || (partitionId != null && taskLock.getGranularity() == LockGranularity.SEGMENT
                            && ((SegmentLock) taskLock).getPartitionId() == partitionId);

            if (match) {
                // Remove task from live list
                log.info("Removing task[%s] from TaskLock[%s]", task.getId(), taskLock);
                final boolean removed = taskLockPosse.removeTask(task);

                if (taskLockPosse.isTasksEmpty()) {
                    log.info("TaskLock is now empty: %s", taskLock);
                    possesHolder.remove(taskLockPosse);
                }

                if (possesHolder.isEmpty()) {
                    intervalToPosses.remove(interval);
                }

                if (intervalToPosses.isEmpty()) {
                    dsRunning.remove(interval.getStart());
                }

                if (running.get(dataSource).size() == 0) {
                    running.remove(dataSource);
                }

                // Wake up blocking-lock waiters
                lockReleaseCondition.signalAll();

                // Remove lock from storage. If it cannot be removed, just ignore the failure.
                try {
                    taskStorage.removeLock(task.getId(), taskLock);
                } catch (Exception e) {
                    log.makeAlert(e, "Failed to clean up lock from storage").addData("task", task.getId())
                            .addData("dataSource", taskLock.getDataSource())
                            .addData("interval", taskLock.getInterval())
                            .addData("version", taskLock.getVersion()).emit();
                }

                if (!removed) {
                    log.makeAlert("Lock release without acquire").addData("task", task.getId())
                            .addData("interval", interval).emit();
                }
            }
        }
    } finally {
        giant.unlock();
    }
}

From source file:org.apache.druid.indexing.overlord.TaskLockbox.java

License:Apache License

/**
 * Return all locks that overlap some search interval.
 *///from w  ww.j a  v  a 2 s  . c o  m
private List<TaskLockPosse> findLockPossesOverlapsInterval(final String dataSource, final Interval interval) {
    giant.lock();

    try {
        final NavigableMap<DateTime, SortedMap<Interval, List<TaskLockPosse>>> dsRunning = running
                .get(dataSource);
        if (dsRunning == null) {
            // No locks at all
            return Collections.emptyList();
        } else {
            // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so:
            final NavigableSet<DateTime> dsLockbox = dsRunning.navigableKeySet();
            final Iterable<DateTime> searchStartTimes = Iterables.concat(
                    // Single interval that starts at or before ours
                    Collections.singletonList(dsLockbox.floor(interval.getStart())),

                    // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive)
                    dsLockbox.subSet(interval.getStart(), false, interval.getEnd(), false));

            return StreamSupport.stream(searchStartTimes.spliterator(), false)
                    .filter(java.util.Objects::nonNull).map(dsRunning::get).filter(java.util.Objects::nonNull)
                    .flatMap(sortedMap -> sortedMap.entrySet().stream())
                    .filter(entry -> entry.getKey().overlaps(interval))
                    .flatMap(entry -> entry.getValue().stream()).collect(Collectors.toList());
        }
    } finally {
        giant.unlock();
    }
}

From source file:org.apache.druid.indexing.worker.IntermediaryDataManager.java

License:Apache License

private static String getPartitionDir(String supervisorTaskId, Interval interval, int partitionId) {
    return Paths.get(supervisorTaskId, interval.getStart().toString(), interval.getEnd().toString(),
            String.valueOf(partitionId)).toString();
}

From source file:org.apache.druid.java.util.common.granularity.PeriodGranularity.java

License:Apache License

@Override
public boolean isAligned(Interval interval) {
    return bucket(interval.getStart()).equals(interval);
}

From source file:org.apache.druid.java.util.common.JodaUtils.java

License:Apache License

public static ArrayList<Interval> condenseIntervals(Iterable<Interval> intervals) {
    ArrayList<Interval> retVal = new ArrayList<>();

    final SortedSet<Interval> sortedIntervals;

    if (intervals instanceof SortedSet) {
        sortedIntervals = (SortedSet<Interval>) intervals;
    } else {/* w w w  . jav  a 2  s .c o  m*/
        sortedIntervals = new TreeSet<>(Comparators.intervalsByStartThenEnd());
        for (Interval interval : intervals) {
            sortedIntervals.add(interval);
        }
    }

    if (sortedIntervals.isEmpty()) {
        return new ArrayList<>();
    }

    Iterator<Interval> intervalsIter = sortedIntervals.iterator();
    Interval currInterval = intervalsIter.next();
    while (intervalsIter.hasNext()) {
        Interval next = intervalsIter.next();

        if (currInterval.abuts(next)) {
            currInterval = new Interval(currInterval.getStart(), next.getEnd());
        } else if (currInterval.overlaps(next)) {
            DateTime nextEnd = next.getEnd();
            DateTime currEnd = currInterval.getEnd();
            currInterval = new Interval(currInterval.getStart(), nextEnd.isAfter(currEnd) ? nextEnd : currEnd);
        } else {
            retVal.add(currInterval);
            currInterval = next;
        }
    }
    retVal.add(currInterval);

    return retVal;
}

From source file:org.apache.druid.java.util.common.JodaUtils.java

License:Apache License

public static Interval umbrellaInterval(Iterable<Interval> intervals) {
    ArrayList<DateTime> startDates = new ArrayList<>();
    ArrayList<DateTime> endDates = new ArrayList<>();

    for (Interval interval : intervals) {
        startDates.add(interval.getStart());
        endDates.add(interval.getEnd());
    }//  ww  w .  j  a v a2s  .c o m

    DateTime minStart = minDateTime(startDates.toArray(new DateTime[0]));
    DateTime maxEnd = maxDateTime(endDates.toArray(new DateTime[0]));

    if (minStart == null || maxEnd == null) {
        throw new IllegalArgumentException("Empty list of intervals");
    }
    return new Interval(minStart, maxEnd);
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

private List<SegmentIdWithShardSpec> getPendingSegmentsForIntervalWithHandle(final Handle handle,
        final String dataSource, final Interval interval) throws IOException {
    final List<SegmentIdWithShardSpec> identifiers = new ArrayList<>();

    final ResultIterator<byte[]> dbSegments = handle.createQuery(StringUtils.format(
            "SELECT payload FROM %1$s WHERE dataSource = :dataSource AND start <= :end and %2$send%2$s >= :start",
            dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("dataSource", dataSource)
            .bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString())
            .map(ByteArrayMapper.FIRST).iterator();

    while (dbSegments.hasNext()) {
        final byte[] payload = dbSegments.next();
        final SegmentIdWithShardSpec identifier = jsonMapper.readValue(payload, SegmentIdWithShardSpec.class);

        if (interval.overlaps(identifier.getInterval())) {
            identifiers.add(identifier);
        }//from www.j  a va  2s  .com
    }

    dbSegments.close();

    return identifiers;
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

private VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalsWithHandle(final Handle handle,
        final String dataSource, final List<Interval> intervals) {
    if (intervals == null || intervals.isEmpty()) {
        throw new IAE("null/empty intervals");
    }/*w  w w .  j  a  v a  2s.co m*/

    final StringBuilder sb = new StringBuilder();
    sb.append("SELECT payload FROM %s WHERE used = true AND dataSource = ? AND (");
    for (int i = 0; i < intervals.size(); i++) {
        sb.append(StringUtils.format("(start <= ? AND %1$send%1$s >= ?)", connector.getQuoteString()));
        if (i == intervals.size() - 1) {
            sb.append(")");
        } else {
            sb.append(" OR ");
        }
    }

    Query<Map<String, Object>> sql = handle
            .createQuery(StringUtils.format(sb.toString(), dbTables.getSegmentsTable())).bind(0, dataSource);

    for (int i = 0; i < intervals.size(); i++) {
        Interval interval = intervals.get(i);
        sql = sql.bind(2 * i + 1, interval.getEnd().toString()).bind(2 * i + 2, interval.getStart().toString());
    }

    try (final ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) {
        return VersionedIntervalTimeline.forSegments(Iterators.transform(dbSegments, payload -> {
            try {
                return jsonMapper.readValue(payload, DataSegment.class);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }));
    }
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

@Nullable
private SegmentIdWithShardSpec allocatePendingSegment(final Handle handle, final String dataSource,
        final String sequenceName, final Interval interval, final ShardSpecFactory shardSpecFactory,
        final String maxVersion) throws IOException {
    final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId(
            handle.createQuery(StringUtils.format(
                    "SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND "
                            + "sequence_name = :sequence_name AND " + "start = :start AND "
                            + "%2$send%2$s = :end",
                    dbTables.getPendingSegmentsTable(), connector.getQuoteString())),
            interval, sequenceName, null, Pair.of("dataSource", dataSource),
            Pair.of("sequence_name", sequenceName), Pair.of("start", interval.getStart().toString()),
            Pair.of("end", interval.getEnd().toString()));

    if (result.found) {
        // The found existing segment identifier can be null if its interval doesn't match with the given interval
        return result.segmentIdentifier;
    }/*www .j  a  va2  s  .com*/

    final SegmentIdWithShardSpec newIdentifier = createNewSegment(handle, dataSource, interval,
            shardSpecFactory, maxVersion);
    if (newIdentifier == null) {
        return null;
    }

    // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
    // Avoiding ON DUPLICATE KEY since it's not portable.
    // Avoiding try/catch since it may cause inadvertent transaction-splitting.

    // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval.
    // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
    // have difficulty with large unique keys (see https://github.com/apache/incubator-druid/issues/2319)
    final String sequenceNamePrevIdSha1 = BaseEncoding.base16()
            .encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff)
                    .putLong(interval.getStartMillis()).putLong(interval.getEndMillis()).hash().asBytes());

    // always insert empty previous sequence id
    insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1);

    log.info("Allocated pending segment [%s] for sequence[%s] in DB", newIdentifier, sequenceName);

    return newIdentifier;
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

private void insertToMetastore(Handle handle, SegmentIdWithShardSpec newIdentifier, String dataSource,
        Interval interval, String previousSegmentId, String sequenceName, String sequenceNamePrevIdSha1)
        throws JsonProcessingException {
    handle.createStatement(StringUtils.format(
            "INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) "
                    + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)",
            dbTables.getPendingSegmentsTable(), connector.getQuoteString()))
            .bind("id", newIdentifier.toString()).bind("dataSource", dataSource)
            .bind("created_date", DateTimes.nowUtc().toString()).bind("start", interval.getStart().toString())
            .bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName)
            .bind("sequence_prev_id", previousSegmentId)
            .bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1)
            .bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute();
}