Example usage for org.joda.time Interval getStart

List of usage examples for org.joda.time Interval getStart

Introduction

In this page you can find the example usage for org.joda.time Interval getStart.

Prototype

public DateTime getStart() 

Source Link

Document

Gets the start of this time interval, which is inclusive, as a DateTime.

Usage

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

@Override
public int deletePendingSegments(String dataSource, Interval deleteInterval) {
    return connector.getDBI().inTransaction((handle, status) -> handle.createStatement(StringUtils.format(
            "delete from %s where datasource = :dataSource and created_date >= :start and created_date < :end",
            dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource)
            .bind("start", deleteInterval.getStart().toString()).bind("end", deleteInterval.getEnd().toString())
            .execute());//from ww  w.j  a va 2s  .c o  m
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

@Override
public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval) {
    List<DataSegment> matchingSegments = connector
            .inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {
                @Override//from w ww  .j a v a2s  .c o  m
                public List<DataSegment> inTransaction(final Handle handle, final TransactionStatus status) {
                    // 2 range conditions are used on different columns, but not all SQL databases properly optimize it.
                    // Some databases can only use an index on one of the columns. An additional condition provides
                    // explicit knowledge that 'start' cannot be greater than 'end'.
                    return handle.createQuery(StringUtils.format(
                            "SELECT payload FROM %1$s WHERE dataSource = :dataSource and start >= :start "
                                    + "and start <= :end and %2$send%2$s <= :end and used = false",
                            dbTables.getSegmentsTable(), connector.getQuoteString()))
                            .setFetchSize(connector.getStreamingFetchSize()).bind("dataSource", dataSource)
                            .bind("start", interval.getStart().toString())
                            .bind("end", interval.getEnd().toString()).map(ByteArrayMapper.FIRST)
                            .fold(new ArrayList<>(), new Folder3<List<DataSegment>, byte[]>() {
                                @Override
                                public List<DataSegment> fold(List<DataSegment> accumulator, byte[] payload,
                                        FoldController foldController, StatementContext statementContext) {
                                    try {
                                        accumulator.add(jsonMapper.readValue(payload, DataSegment.class));
                                        return accumulator;
                                    } catch (Exception e) {
                                        throw new RuntimeException(e);
                                    }
                                }
                            });
                }
            });

    log.info("Found %,d segments for %s for interval %s.", matchingSegments.size(), dataSource, interval);
    return matchingSegments;
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

@Override
public List<Pair<DataSegment, String>> getUsedSegmentAndCreatedDateForInterval(String dataSource,
        Interval interval) {
    return connector.retryWithHandle(handle -> handle
            .createQuery(StringUtils.format(
                    "SELECT created_date, payload FROM %1$s WHERE dataSource = :dataSource "
                            + "AND start >= :start AND %2$send%2$s <= :end AND used = true",
                    dbTables.getSegmentsTable(), connector.getQuoteString()))
            .bind("dataSource", dataSource).bind("start", interval.getStart().toString())
            .bind("end", interval.getEnd().toString()).map(new ResultSetMapper<Pair<DataSegment, String>>() {
                @Override//from  ww w  .  ja va  2 s  . c o  m
                public Pair<DataSegment, String> map(int index, ResultSet r, StatementContext ctx)
                        throws SQLException {
                    try {
                        return new Pair<>(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class),
                                r.getString("created_date"));
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            }).list());
}

From source file:org.apache.druid.metadata.SQLMetadataSegmentManager.java

License:Apache License

/**
 * Implementation for both {@link #markAsUsedAllNonOvershadowedSegmentsInDataSource} (if the given interval is null)
 * and {@link #markAsUsedNonOvershadowedSegmentsInInterval}.
 *///from www.  j av  a  2 s. co  m
private int doMarkAsUsedNonOvershadowedSegments(String dataSourceName, @Nullable Interval interval) {
    List<DataSegment> usedSegmentsOverlappingInterval = new ArrayList<>();
    List<DataSegment> unusedSegmentsInInterval = new ArrayList<>();
    connector.inReadOnlyTransaction((handle, status) -> {
        String queryString = StringUtils.format("SELECT used, payload FROM %1$s WHERE dataSource = :dataSource",
                getSegmentsTable());
        if (interval != null) {
            queryString += StringUtils.format(" AND start < :end AND %1$send%1$s > :start",
                    connector.getQuoteString());
        }
        Query<?> query = handle.createQuery(queryString).setFetchSize(connector.getStreamingFetchSize())
                .bind("dataSource", dataSourceName);
        if (interval != null) {
            query = query.bind("start", interval.getStart().toString()).bind("end",
                    interval.getEnd().toString());
        }
        query = query.map((int index, ResultSet resultSet, StatementContext context) -> {
            try {
                DataSegment segment = jsonMapper.readValue(resultSet.getBytes("payload"), DataSegment.class);
                if (resultSet.getBoolean("used")) {
                    usedSegmentsOverlappingInterval.add(segment);
                } else {
                    if (interval == null || interval.contains(segment.getInterval())) {
                        unusedSegmentsInInterval.add(segment);
                    }
                }
                return null;
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
        // Consume the query results to ensure usedSegmentsOverlappingInterval and unusedSegmentsInInterval are
        // populated.
        consume(query.iterator());
        return null;
    });

    VersionedIntervalTimeline<String, DataSegment> versionedIntervalTimeline = VersionedIntervalTimeline
            .forSegments(Iterators.concat(usedSegmentsOverlappingInterval.iterator(),
                    unusedSegmentsInInterval.iterator()));

    return markNonOvershadowedSegmentsAsUsed(unusedSegmentsInInterval, versionedIntervalTimeline);
}

From source file:org.apache.druid.metadata.SQLMetadataSegmentManager.java

License:Apache License

@Override
public int markAsUnusedSegmentsInInterval(String dataSourceName, Interval interval) {
    try {/*w w  w .j ava2s .c  om*/
        Integer numUpdatedDatabaseEntries = connector.getDBI()
                .withHandle(handle -> handle
                        .createStatement(StringUtils.format(
                                "UPDATE %s SET used=false WHERE datasource = :datasource "
                                        + "AND start >= :start AND %2$send%2$s <= :end",
                                getSegmentsTable(), connector.getQuoteString()))
                        .bind("datasource", dataSourceName).bind("start", interval.getStart().toString())
                        .bind("end", interval.getEnd().toString()).execute());
        return numUpdatedDatabaseEntries;
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.druid.query.IntervalChunkingQueryRunner.java

License:Apache License

private static Iterable<Interval> splitInterval(Interval interval, Period period) {
    if (interval.getEndMillis() == interval.getStartMillis()) {
        return Collections.singletonList(interval);
    }/*from w ww  .j  a va  2  s  . co m*/

    List<Interval> intervals = new ArrayList<>();
    Iterator<Interval> timestamps = new PeriodGranularity(period, null, null).getIterable(interval).iterator();

    DateTime start = DateTimes.max(timestamps.next().getStart(), interval.getStart());
    while (timestamps.hasNext()) {
        DateTime end = timestamps.next().getStart();
        intervals.add(new Interval(start, end));
        start = end;
    }

    if (start.compareTo(interval.getEnd()) < 0) {
        intervals.add(new Interval(start, interval.getEnd()));
    }

    return intervals;
}

From source file:org.apache.druid.query.select.SelectQueryQueryToolChest.java

License:Apache License

@Override
public <T extends LogicalSegment> List<T> filterSegments(SelectQuery query, List<T> segments) {
    // at the point where this code is called, only one datasource should exist.
    final String dataSource = Iterables.getOnlyElement(query.getDataSource().getNames());

    PagingSpec pagingSpec = query.getPagingSpec();
    Map<String, Integer> paging = pagingSpec.getPagingIdentifiers();
    if (paging == null || paging.isEmpty()) {
        return segments;
    }/*from w w  w  . j ava  2  s .c  om*/

    final Granularity granularity = query.getGranularity();

    TreeMap<Long, Long> granularThresholds = new TreeMap<>();

    // A paged select query using a UnionDataSource will return pagingIdentifiers from segments in more than one
    // dataSource which confuses subsequent queries and causes a failure. To avoid this, filter only the paging keys
    // that are applicable to this dataSource so that each dataSource in a union query gets the appropriate keys.
    paging.keySet().stream().filter(identifier -> SegmentId.tryParse(dataSource, identifier) != null)
            .map(SegmentId.makeIntervalExtractor(dataSource))
            .sorted(query.isDescending() ? Comparators.intervalsByEndThenStart()
                    : Comparators.intervalsByStartThenEnd())
            .forEach(interval -> {
                if (query.isDescending()) {
                    long granularEnd = granularity.bucketStart(interval.getEnd()).getMillis();
                    Long currentEnd = granularThresholds.get(granularEnd);
                    if (currentEnd == null || interval.getEndMillis() > currentEnd) {
                        granularThresholds.put(granularEnd, interval.getEndMillis());
                    }
                } else {
                    long granularStart = granularity.bucketStart(interval.getStart()).getMillis();
                    Long currentStart = granularThresholds.get(granularStart);
                    if (currentStart == null || interval.getStartMillis() < currentStart) {
                        granularThresholds.put(granularStart, interval.getStartMillis());
                    }
                }
            });

    List<T> queryIntervals = Lists.newArrayList(segments);

    Iterator<T> it = queryIntervals.iterator();
    if (query.isDescending()) {
        while (it.hasNext()) {
            Interval interval = it.next().getInterval();
            Map.Entry<Long, Long> ceiling = granularThresholds
                    .ceilingEntry(granularity.bucketStart(interval.getEnd()).getMillis());
            if (ceiling == null || interval.getStartMillis() >= ceiling.getValue()) {
                it.remove();
            }
        }
    } else {
        while (it.hasNext()) {
            Interval interval = it.next().getInterval();
            Map.Entry<Long, Long> floor = granularThresholds
                    .floorEntry(granularity.bucketStart(interval.getStart()).getMillis());
            if (floor == null || interval.getEndMillis() <= floor.getValue()) {
                it.remove();
            }
        }
    }
    return queryIntervals;
}

From source file:org.apache.druid.query.vector.VectorCursorGranularizer.java

License:Apache License

@Nullable
public static VectorCursorGranularizer create(final StorageAdapter storageAdapter, final VectorCursor cursor,
        final Granularity granularity, final Interval queryInterval) {
    final DateTime minTime = storageAdapter.getMinTime();
    final DateTime maxTime = storageAdapter.getMaxTime();

    final Interval storageAdapterInterval = new Interval(minTime, granularity.bucketEnd(maxTime));
    final Interval clippedQueryInterval = queryInterval.overlap(storageAdapterInterval);

    if (clippedQueryInterval == null) {
        return null;
    }/*from  www . j av a  2 s .co  m*/

    final Iterable<Interval> bucketIterable = granularity.getIterable(clippedQueryInterval);
    final Interval firstBucket = granularity.bucket(clippedQueryInterval.getStart());

    final VectorValueSelector timeSelector;
    if (firstBucket.contains(clippedQueryInterval)) {
        // Only one bucket, no need to read the time column.
        assert Iterables.size(bucketIterable) == 1;
        timeSelector = null;
    } else {
        // Multiple buckets, need to read the time column to know when we move from one to the next.
        timeSelector = cursor.getColumnSelectorFactory().makeValueSelector(ColumnHolder.TIME_COLUMN_NAME);
    }

    return new VectorCursorGranularizer(cursor, bucketIterable, timeSelector);
}

From source file:org.apache.druid.segment.QueryableIndexCursorSequenceBuilder.java

License:Apache License

public Sequence<Cursor> build(final Granularity gran) {
    final Offset baseOffset;

    if (filterBitmap == null) {
        baseOffset = descending ? new SimpleDescendingOffset(index.getNumRows())
                : new SimpleAscendingOffset(index.getNumRows());
    } else {//from   ww w . j  a va2  s .com
        baseOffset = BitmapOffset.of(filterBitmap, descending, index.getNumRows());
    }

    // Column caches shared amongst all cursors in this sequence.
    final Map<String, BaseColumn> columnCache = new HashMap<>();

    final NumericColumn timestamps = (NumericColumn) index.getColumnHolder(ColumnHolder.TIME_COLUMN_NAME)
            .getColumn();

    final Closer closer = Closer.create();
    closer.register(timestamps);

    Iterable<Interval> iterable = gran.getIterable(interval);
    if (descending) {
        iterable = Lists.reverse(ImmutableList.copyOf(iterable));
    }

    return Sequences.withBaggage(Sequences.map(Sequences.simple(iterable), new Function<Interval, Cursor>() {
        @Override
        public Cursor apply(final Interval inputInterval) {
            final long timeStart = Math.max(interval.getStartMillis(), inputInterval.getStartMillis());
            final long timeEnd = Math.min(interval.getEndMillis(),
                    gran.increment(inputInterval.getStart()).getMillis());

            if (descending) {
                for (; baseOffset.withinBounds(); baseOffset.increment()) {
                    if (timestamps.getLongSingleValueRow(baseOffset.getOffset()) < timeEnd) {
                        break;
                    }
                }
            } else {
                for (; baseOffset.withinBounds(); baseOffset.increment()) {
                    if (timestamps.getLongSingleValueRow(baseOffset.getOffset()) >= timeStart) {
                        break;
                    }
                }
            }

            final Offset offset = descending
                    ? new DescendingTimestampCheckingOffset(baseOffset, timestamps, timeStart,
                            minDataTimestamp >= timeStart)
                    : new AscendingTimestampCheckingOffset(baseOffset, timestamps, timeEnd,
                            maxDataTimestamp < timeEnd);

            final Offset baseCursorOffset = offset.clone();
            final ColumnSelectorFactory columnSelectorFactory = new QueryableIndexColumnSelectorFactory(index,
                    virtualColumns, descending, closer, baseCursorOffset.getBaseReadableOffset(), columnCache);
            final DateTime myBucket = gran.toDateTime(inputInterval.getStartMillis());

            if (postFilter == null) {
                return new QueryableIndexCursor(baseCursorOffset, columnSelectorFactory, myBucket);
            } else {
                FilteredOffset filteredOffset = new FilteredOffset(baseCursorOffset, columnSelectorFactory,
                        descending, postFilter, bitmapIndexSelector);
                return new QueryableIndexCursor(filteredOffset, columnSelectorFactory, myBucket);
            }

        }
    }), closer);
}

From source file:org.apache.druid.server.audit.SQLAuditManager.java

License:Apache License

@Override
public List<AuditEntry> fetchAuditHistory(final String key, final String type, Interval interval) {
    final Interval theInterval = getIntervalOrDefault(interval);
    return dbi.withHandle(new HandleCallback<List<AuditEntry>>() {
        @Override//from  w  w  w . j a  v  a2s.  co  m
        public List<AuditEntry> withHandle(Handle handle) {
            return handle.createQuery(StringUtils.format(
                    "SELECT payload FROM %s WHERE audit_key = :audit_key and type = :type and created_date between :start_date and :end_date ORDER BY created_date",
                    getAuditTable())).bind("audit_key", key).bind("type", type)
                    .bind("start_date", theInterval.getStart().toString())
                    .bind("end_date", theInterval.getEnd().toString()).map(new ResultSetMapper<AuditEntry>() {
                        @Override
                        public AuditEntry map(int index, ResultSet r, StatementContext ctx)
                                throws SQLException {
                            try {
                                return jsonMapper.readValue(r.getBytes("payload"), AuditEntry.class);
                            } catch (IOException e) {
                                throw new SQLException(e);
                            }
                        }
                    }).list();
        }
    });
}