Example usage for org.joda.time Interval getStart

List of usage examples for org.joda.time Interval getStart

Introduction

In this page you can find the example usage for org.joda.time Interval getStart.

Prototype

public DateTime getStart() 

Source Link

Document

Gets the start of this time interval, which is inclusive, as a DateTime.

Usage

From source file:com.metamx.druid.indexing.common.task.IndexGeneratorTask.java

License:Open Source License

@JsonCreator
public IndexGeneratorTask(@JsonProperty("id") String id, @JsonProperty("groupId") String groupId,
        @JsonProperty("interval") Interval interval, @JsonProperty("firehose") FirehoseFactory firehoseFactory,
        @JsonProperty("schema") Schema schema, @JsonProperty("rowFlushBoundary") int rowFlushBoundary) {
    super(id != null ? id
            : String.format("%s_generator_%s_%s_%s", groupId, interval.getStart(), interval.getEnd(),
                    schema.getShardSpec().getPartitionNum()),
            groupId, schema.getDataSource(), Preconditions.checkNotNull(interval, "interval"));

    this.firehoseFactory = firehoseFactory;
    this.schema = schema;
    this.rowFlushBoundary = rowFlushBoundary;
}

From source file:com.metamx.druid.indexing.common.task.IndexGeneratorTask.java

License:Open Source License

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    // We should have a lock from before we started running
    final TaskLock myLock = Iterables
            .getOnlyElement(toolbox.getTaskActionClient().submit(new LockListAction()));

    // We know this exists
    final Interval interval = getImplicitLockInterval().get();

    // Set up temporary directory for indexing
    final File tmpDir = new File(toolbox.getTaskWorkDir(),
            String.format("%s_%s_%s_%s_%s", this.getDataSource(), interval.getStart(), interval.getEnd(),
                    myLock.getVersion(), schema.getShardSpec().getPartitionNum()));

    // We need to track published segments.
    final List<DataSegment> pushedSegments = new CopyOnWriteArrayList<DataSegment>();
    final DataSegmentPusher wrappedDataSegmentPusher = new DataSegmentPusher() {
        @Override//from w w w  . j  a  v a2 s. com
        public DataSegment push(File file, DataSegment segment) throws IOException {
            final DataSegment pushedSegment = toolbox.getSegmentPusher().push(file, segment);
            pushedSegments.add(pushedSegment);
            return pushedSegment;
        }
    };

    // Create firehose + plumber
    final FireDepartmentMetrics metrics = new FireDepartmentMetrics();
    final Firehose firehose = firehoseFactory.connect();
    final Plumber plumber = new YeOldePlumberSchool(interval, myLock.getVersion(), wrappedDataSegmentPusher,
            tmpDir).findPlumber(schema, metrics);

    // rowFlushBoundary for this job
    final int myRowFlushBoundary = this.rowFlushBoundary > 0 ? rowFlushBoundary
            : toolbox.getConfig().getDefaultRowFlushBoundary();

    try {
        while (firehose.hasMore()) {
            final InputRow inputRow = firehose.nextRow();

            if (shouldIndex(inputRow)) {
                final Sink sink = plumber.getSink(inputRow.getTimestampFromEpoch());
                if (sink == null) {
                    throw new NullPointerException(
                            String.format("Was expecting non-null sink for timestamp[%s]",
                                    new DateTime(inputRow.getTimestampFromEpoch())));
                }

                int numRows = sink.add(inputRow);
                metrics.incrementProcessed();

                if (numRows >= myRowFlushBoundary) {
                    plumber.persist(firehose.commit());
                }
            } else {
                metrics.incrementThrownAway();
            }
        }
    } finally {
        firehose.close();
    }

    plumber.persist(firehose.commit());
    plumber.finishJob();

    // Output metrics
    log.info("Task[%s] took in %,d rows (%,d processed, %,d unparseable, %,d thrown away) and output %,d rows",
            getId(), metrics.processed() + metrics.unparseable() + metrics.thrownAway(), metrics.processed(),
            metrics.unparseable(), metrics.thrownAway(), metrics.rowOutput());

    // Request segment pushes
    toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.copyOf(pushedSegments)));

    // Done
    return TaskStatus.success(getId());
}

From source file:com.metamx.druid.indexing.common.task.KillTask.java

License:Open Source License

@JsonCreator
public KillTask(@JsonProperty("id") String id, @JsonProperty("dataSource") String dataSource,
        @JsonProperty("interval") Interval interval) {
    super(id != null ? id
            : String.format("kill_%s_%s_%s_%s", dataSource, interval.getStart(), interval.getEnd(),
                    new DateTime().toString()),
            dataSource, interval);/*from   w w w . j  a va2s.c om*/
}

From source file:com.metamx.druid.indexing.common.task.VersionConverterTask.java

License:Open Source License

private static String makeId(String dataSource, Interval interval) {
    return joinId(TYPE, dataSource, interval.getStart(), interval.getEnd(), new DateTime());
}

From source file:com.metamx.druid.indexing.coordinator.MergerDBCoordinator.java

License:Open Source License

public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval) {
    List<DataSegment> matchingSegments = dbi.withHandle(new HandleCallback<List<DataSegment>>() {
        @Override/* w  w w. j a v  a2  s  . c om*/
        public List<DataSegment> withHandle(Handle handle) throws IOException {
            return handle.createQuery(String.format(
                    "SELECT payload FROM %s WHERE dataSource = :dataSource and start >= :start and end <= :end and used = 0",
                    dbConnectorConfig.getSegmentTable())).bind("dataSource", dataSource)
                    .bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString())
                    .fold(Lists.<DataSegment>newArrayList(),
                            new Folder3<List<DataSegment>, Map<String, Object>>() {
                                @Override
                                public List<DataSegment> fold(List<DataSegment> accumulator,
                                        Map<String, Object> stringObjectMap, FoldController foldController,
                                        StatementContext statementContext) throws SQLException {
                                    try {
                                        DataSegment segment = jsonMapper.readValue(
                                                (String) stringObjectMap.get("payload"), DataSegment.class);

                                        accumulator.add(segment);

                                        return accumulator;
                                    } catch (Exception e) {
                                        throw Throwables.propagate(e);
                                    }
                                }
                            });
        }
    });

    log.info("Found %,d segments for %s for interval %s.", matchingSegments.size(), dataSource, interval);
    return matchingSegments;
}

From source file:com.metamx.druid.indexing.coordinator.TaskLockbox.java

License:Open Source License

/**
 * Return all locks that overlap some search interval.
 *///from  w w  w.ja v  a2  s .c  o m
private List<TaskLockPosse> findLockPossesForInterval(final String dataSource, final Interval interval) {
    giant.lock();

    try {
        final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(dataSource);
        if (dsRunning == null) {
            // No locks at all
            return Collections.emptyList();
        } else {
            // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so:
            final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet();
            final Iterable<Interval> searchIntervals = Iterables.concat(
                    // Single interval that starts at or before ours
                    Collections.singletonList(
                            dsLockbox.floor(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)))),

                    // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive)
                    dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)), false,
                            new Interval(interval.getEnd(), interval.getEnd()), false));

            return Lists
                    .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() {
                        @Override
                        public boolean apply(@Nullable Interval searchInterval) {
                            return searchInterval != null && searchInterval.overlaps(interval);
                        }
                    }).transform(new Function<Interval, TaskLockPosse>() {
                        @Override
                        public TaskLockPosse apply(Interval interval) {
                            return dsRunning.get(interval);
                        }
                    }));
        }
    } finally {
        giant.unlock();
    }
}

From source file:com.metamx.druid.merger.common.task.DeleteTask.java

License:Open Source License

@JsonCreator
public DeleteTask(@JsonProperty("dataSource") String dataSource, @JsonProperty("interval") Interval interval) {
    super(String.format("delete_%s_%s_%s_%s", dataSource, interval.getStart(), interval.getEnd(),
            new DateTime().toString()), dataSource, interval);
}

From source file:com.metamx.druid.merger.common.task.IndexDeterminePartitionsTask.java

License:Open Source License

@JsonCreator
public IndexDeterminePartitionsTask(@JsonProperty("groupId") String groupId,
        @JsonProperty("interval") Interval interval, @JsonProperty("firehose") FirehoseFactory firehoseFactory,
        @JsonProperty("schema") Schema schema, @JsonProperty("targetPartitionSize") long targetPartitionSize) {
    super(String.format("%s_partitions_%s_%s", groupId, interval.getStart(), interval.getEnd()), groupId,
            schema.getDataSource(), interval);

    this.firehoseFactory = firehoseFactory;
    this.schema = schema;
    this.targetPartitionSize = targetPartitionSize;
}

From source file:com.metamx.druid.merger.common.task.IndexGeneratorTask.java

License:Open Source License

@JsonCreator
public IndexGeneratorTask(@JsonProperty("groupId") String groupId, @JsonProperty("interval") Interval interval,
        @JsonProperty("firehose") FirehoseFactory firehoseFactory, @JsonProperty("schema") Schema schema) {
    super(String.format("%s_generator_%s_%s_%s", groupId, interval.getStart(), interval.getEnd(),
            schema.getShardSpec().getPartitionNum()), groupId, schema.getDataSource(), interval);

    this.firehoseFactory = firehoseFactory;
    this.schema = schema;
}

From source file:com.metamx.druid.merger.coordinator.TaskQueue.java

License:Open Source License

/**
 * Return all locks that overlap some search interval.
 *//*from w ww  .ja va2 s.  c om*/
private List<TaskGroup> findLocks(final String dataSource, final Interval interval) {
    giant.lock();

    try {
        final NavigableMap<Interval, TaskGroup> dsRunning = running.get(dataSource);
        if (dsRunning == null) {
            // No locks at all
            return Collections.emptyList();
        } else {
            // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so:
            final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet();
            final Iterable<Interval> searchIntervals = Iterables.concat(
                    // Single interval that starts at or before ours
                    Collections.singletonList(
                            dsLockbox.floor(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)))),

                    // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive)
                    dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)), false,
                            new Interval(interval.getEnd(), interval.getEnd()), false));

            return Lists
                    .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() {
                        @Override
                        public boolean apply(@Nullable Interval searchInterval) {
                            return searchInterval != null && searchInterval.overlaps(interval);
                        }
                    }).transform(new Function<Interval, TaskGroup>() {
                        @Override
                        public TaskGroup apply(Interval interval) {
                            return dsRunning.get(interval);
                        }
                    }));
        }
    } finally {
        giant.unlock();
    }
}