List of usage examples for org.joda.time Interval getStart
public DateTime getStart()
From source file:eu.itesla_project.modules.topo.TopologyHistory.java
License:Mozilla Public License
private static Path getPath(Path dir, Interval histoInterval, double correlationThreshold) { return dir.resolve("topology-history-" + Double.toString(correlationThreshold) + "-" + histoInterval.getStart() + "-" + histoInterval.getEnd() + ".xml"); }
From source file:eu.itesla_project.offline.server.message.WorkflowStatusMessage.java
License:Mozilla Public License
@Override public void toJson(JsonGenerator generator) { generator.write("workflowId", status.getWorkflowId()); OfflineWorkflowStep step = status.getStep(); OfflineWorkflowCreationParameters creationParameters = status.getCreationParameters(); OfflineWorkflowStartParameters startParameters = status.getStartParameters(); if (creationParameters != null) { if (creationParameters.getBaseCaseDate() != null) { DateFormat df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT); generator.write("baseCaseDate", df.format(creationParameters.getBaseCaseDate().toDate())); }/*from www .j a v a2 s . c o m*/ if (creationParameters.getHistoInterval() != null) { DateFormat df = SimpleDateFormat.getDateInstance(DateFormat.SHORT); Interval interval = creationParameters.getHistoInterval(); if (interval.getStart() != null) { generator.write("intervalStart", df.format(interval.getStart().toDate())); } if (interval.getEnd() != null) { generator.write("intervalStop", df.format(interval.getEnd().toDate())); } } if (creationParameters.getCountries() != null) { generator.writeStartArray("countries"); for (Country country : creationParameters.getCountries()) { generator.write(country.getName()); } generator.writeEnd(); } } if (step != null) { generator.write("step", step.name()); generator.write("running", step.isRunning()); } else { generator.write("step", "null"); generator.write("running", false); } if (startParameters != null) { generator.write("duration", startParameters.getDuration()); generator.write("startTime", status.getStartTime().toString()); } }
From source file:fr.cls.atoll.motu.library.misc.intfce.Organizer.java
License:Open Source License
/** * Inits the time coverage.//from w w w . j a va2 s . c o m * * @param datePeriod the date period * * @return the time coverage * * @throws MotuException the motu exception */ public static TimeCoverage initTimeCoverage(Interval datePeriod) throws MotuException { if (LOG.isDebugEnabled()) { LOG.debug("initTimeCoverage(DatePeriod) - entering"); } TimeCoverage timeCoverage = Organizer.createTimeCoverage(); if (datePeriod == null) { if (LOG.isDebugEnabled()) { LOG.debug("initTimeCoverage(DatePeriod, TimeCoverage) - datePeriod is null - exiting"); } return timeCoverage; } Date start = datePeriod.getStart().toDate(); Date end = datePeriod.getEnd().toDate(); timeCoverage.setStart(Organizer.dateToXMLGregorianCalendar(start)); timeCoverage.setEnd(Organizer.dateToXMLGregorianCalendar(end)); timeCoverage.setCode(ErrorType.OK); timeCoverage.setMsg(ErrorType.OK.toString()); return timeCoverage; }
From source file:google.registry.monitoring.metrics.stackdriver.StackdriverWriter.java
License:Open Source License
private static TimeInterval encodeTimeInterval(Interval nativeInterval, Kind metricKind) { TimeInterval encodedInterval = new TimeInterval() .setStartTime(DATETIME_FORMATTER.print(nativeInterval.getStart())); DateTime endTimestamp = nativeInterval.toDurationMillis() == 0 && metricKind != Kind.GAUGE ? nativeInterval.getEnd().plusMillis(1) : nativeInterval.getEnd();/*from www .j a v a 2 s. c o m*/ return encodedInterval.setEndTime(DATETIME_FORMATTER.print(endTimestamp)); }
From source file:google.registry.tools.params.IntervalParameter.java
License:Open Source License
@Override public Interval convert(String value) { // Interval.parse(null) creates an interval with both start and end times set to now. // Do something a little more reasonable. if (value == null) { throw new NullPointerException(); }//from w ww . ja va 2s. com Interval interval = Interval.parse(value); // Interval does not have a way to set the time zone, so create a new interval with the // start and end times of the parsed interval converted to UTC. return new Interval(interval.getStart().withZone(DateTimeZone.UTC), interval.getEnd().withZone(DateTimeZone.UTC)); }
From source file:io.coala.dsol.util.TreatmentBuilder.java
License:Apache License
public TreatmentBuilder withRunInterval(final Interval interval) { return withStartTime(interval.getStart()).withRunLength(interval.toDuration()); }
From source file:io.druid.indexer.DetermineHashedPartitionsJob.java
License:Apache License
public boolean run() { try {//from w w w . j av a 2 s . c o m /* * Group by (timestamp, dimensions) so we can correctly count dimension values as they would appear * in the final segment. */ long startTime = System.currentTimeMillis(); final Job groupByJob = Job.getInstance(new Configuration(), String .format("%s-determine_partitions_hashed-%s", config.getDataSource(), config.getIntervals())); JobHelper.injectSystemProperties(groupByJob); config.addJobProperties(groupByJob); groupByJob.setMapperClass(DetermineCardinalityMapper.class); groupByJob.setMapOutputKeyClass(LongWritable.class); groupByJob.setMapOutputValueClass(BytesWritable.class); groupByJob.setReducerClass(DetermineCardinalityReducer.class); groupByJob.setOutputKeyClass(NullWritable.class); groupByJob.setOutputValueClass(NullWritable.class); groupByJob.setOutputFormatClass(SequenceFileOutputFormat.class); groupByJob.setPartitionerClass(DetermineHashedPartitionsPartitioner.class); if (!config.getSegmentGranularIntervals().isPresent()) { groupByJob.setNumReduceTasks(1); } else { groupByJob.setNumReduceTasks(config.getSegmentGranularIntervals().get().size()); } JobHelper.setupClasspath(JobHelper.distributedClassPath(config.getWorkingPath()), JobHelper.distributedClassPath(config.makeIntermediatePath()), groupByJob); config.addInputPaths(groupByJob); config.intoConfiguration(groupByJob); FileOutputFormat.setOutputPath(groupByJob, config.makeGroupedDataDir()); groupByJob.submit(); log.info("Job %s submitted, status available at: %s", groupByJob.getJobName(), groupByJob.getTrackingURL()); if (!groupByJob.waitForCompletion(true)) { log.error("Job failed: %s", groupByJob.getJobID()); return false; } /* * Load partitions and intervals determined by the previous job. */ log.info("Job completed, loading up partitions for intervals[%s].", config.getSegmentGranularIntervals()); FileSystem fileSystem = null; if (!config.getSegmentGranularIntervals().isPresent()) { final Path intervalInfoPath = config.makeIntervalInfoPath(); fileSystem = intervalInfoPath.getFileSystem(groupByJob.getConfiguration()); if (!Utils.exists(groupByJob, fileSystem, intervalInfoPath)) { throw new ISE("Path[%s] didn't exist!?", intervalInfoPath); } List<Interval> intervals = config.jsonMapper.readValue( Utils.openInputStream(groupByJob, intervalInfoPath), new TypeReference<List<Interval>>() { }); config.setGranularitySpec( new UniformGranularitySpec(config.getGranularitySpec().getSegmentGranularity(), config.getGranularitySpec().getQueryGranularity(), intervals)); log.info("Determined Intervals for Job [%s]" + config.getSegmentGranularIntervals()); } Map<DateTime, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(DateTimeComparator.getInstance()); int shardCount = 0; for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) { DateTime bucket = segmentGranularity.getStart(); final Path partitionInfoPath = config.makeSegmentPartitionInfoPath(segmentGranularity); if (fileSystem == null) { fileSystem = partitionInfoPath.getFileSystem(groupByJob.getConfiguration()); } if (Utils.exists(groupByJob, fileSystem, partitionInfoPath)) { final Long numRows = config.jsonMapper.readValue( Utils.openInputStream(groupByJob, partitionInfoPath), new TypeReference<Long>() { }); log.info("Found approximately [%,d] rows in data.", numRows); final int numberOfShards = (int) Math.ceil((double) numRows / config.getTargetPartitionSize()); log.info("Creating [%,d] shards", numberOfShards); List<HadoopyShardSpec> actualSpecs = Lists.newArrayListWithExpectedSize(numberOfShards); if (numberOfShards == 1) { actualSpecs.add(new HadoopyShardSpec(new NoneShardSpec(), shardCount++)); } else { for (int i = 0; i < numberOfShards; ++i) { actualSpecs.add(new HadoopyShardSpec(new HashBasedNumberedShardSpec(i, numberOfShards, HadoopDruidIndexerConfig.jsonMapper), shardCount++)); log.info("DateTime[%s], partition[%d], spec[%s]", bucket, i, actualSpecs.get(i)); } } shardSpecs.put(bucket, actualSpecs); } else { log.info("Path[%s] didn't exist!?", partitionInfoPath); } } config.setShardSpecs(shardSpecs); log.info("DetermineHashedPartitionsJob took %d millis", (System.currentTimeMillis() - startTime)); return true; } catch (Exception e) { throw Throwables.propagate(e); } }
From source file:io.druid.indexer.DeterminePartitionsJob.java
License:Apache License
public boolean run() { try {// w ww . j av a 2 s .c o m /* * Group by (timestamp, dimensions) so we can correctly count dimension values as they would appear * in the final segment. */ if (!(config.getPartitionsSpec() instanceof SingleDimensionPartitionsSpec)) { throw new ISE( "DeterminePartitionsJob can only be run for SingleDimensionPartitionsSpec, partitionSpec found [%s]", config.getPartitionsSpec()); } if (!config.getPartitionsSpec().isAssumeGrouped()) { final Job groupByJob = Job.getInstance(new Configuration(), String.format( "%s-determine_partitions_groupby-%s", config.getDataSource(), config.getIntervals())); JobHelper.injectSystemProperties(groupByJob); config.addJobProperties(groupByJob); groupByJob.setMapperClass(DeterminePartitionsGroupByMapper.class); groupByJob.setMapOutputKeyClass(BytesWritable.class); groupByJob.setMapOutputValueClass(NullWritable.class); groupByJob.setCombinerClass(DeterminePartitionsGroupByReducer.class); groupByJob.setReducerClass(DeterminePartitionsGroupByReducer.class); groupByJob.setOutputKeyClass(BytesWritable.class); groupByJob.setOutputValueClass(NullWritable.class); groupByJob.setOutputFormatClass(SequenceFileOutputFormat.class); JobHelper.setupClasspath(JobHelper.distributedClassPath(config.getWorkingPath()), JobHelper.distributedClassPath(config.makeIntermediatePath()), groupByJob); config.addInputPaths(groupByJob); config.intoConfiguration(groupByJob); FileOutputFormat.setOutputPath(groupByJob, config.makeGroupedDataDir()); groupByJob.submit(); log.info("Job %s submitted, status available at: %s", groupByJob.getJobName(), groupByJob.getTrackingURL()); if (!groupByJob.waitForCompletion(true)) { log.error("Job failed: %s", groupByJob.getJobID()); return false; } } else { log.info("Skipping group-by job."); } /* * Read grouped data and determine appropriate partitions. */ final Job dimSelectionJob = Job.getInstance(new Configuration(), String.format( "%s-determine_partitions_dimselection-%s", config.getDataSource(), config.getIntervals())); dimSelectionJob.getConfiguration().set("io.sort.record.percent", "0.19"); JobHelper.injectSystemProperties(dimSelectionJob); config.addJobProperties(dimSelectionJob); if (!config.getPartitionsSpec().isAssumeGrouped()) { // Read grouped data from the groupByJob. dimSelectionJob.setMapperClass(DeterminePartitionsDimSelectionPostGroupByMapper.class); dimSelectionJob.setInputFormatClass(SequenceFileInputFormat.class); FileInputFormat.addInputPath(dimSelectionJob, config.makeGroupedDataDir()); } else { // Directly read the source data, since we assume it's already grouped. dimSelectionJob.setMapperClass(DeterminePartitionsDimSelectionAssumeGroupedMapper.class); config.addInputPaths(dimSelectionJob); } SortableBytes.useSortableBytesAsMapOutputKey(dimSelectionJob); dimSelectionJob.setMapOutputValueClass(Text.class); dimSelectionJob.setCombinerClass(DeterminePartitionsDimSelectionCombiner.class); dimSelectionJob.setReducerClass(DeterminePartitionsDimSelectionReducer.class); dimSelectionJob.setOutputKeyClass(BytesWritable.class); dimSelectionJob.setOutputValueClass(Text.class); dimSelectionJob.setOutputFormatClass(DeterminePartitionsDimSelectionOutputFormat.class); dimSelectionJob.setPartitionerClass(DeterminePartitionsDimSelectionPartitioner.class); dimSelectionJob.setNumReduceTasks(config.getGranularitySpec().bucketIntervals().get().size()); JobHelper.setupClasspath(JobHelper.distributedClassPath(config.getWorkingPath()), JobHelper.distributedClassPath(config.makeIntermediatePath()), dimSelectionJob); config.intoConfiguration(dimSelectionJob); FileOutputFormat.setOutputPath(dimSelectionJob, config.makeIntermediatePath()); dimSelectionJob.submit(); log.info("Job %s submitted, status available at: %s", dimSelectionJob.getJobName(), dimSelectionJob.getTrackingURL()); if (!dimSelectionJob.waitForCompletion(true)) { log.error("Job failed: %s", dimSelectionJob.getJobID().toString()); return false; } /* * Load partitions determined by the previous job. */ log.info("Job completed, loading up partitions for intervals[%s].", config.getSegmentGranularIntervals()); FileSystem fileSystem = null; Map<DateTime, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(DateTimeComparator.getInstance()); int shardCount = 0; for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) { final Path partitionInfoPath = config.makeSegmentPartitionInfoPath(segmentGranularity); if (fileSystem == null) { fileSystem = partitionInfoPath.getFileSystem(dimSelectionJob.getConfiguration()); } if (Utils.exists(dimSelectionJob, fileSystem, partitionInfoPath)) { List<ShardSpec> specs = config.jsonMapper.readValue( Utils.openInputStream(dimSelectionJob, partitionInfoPath), new TypeReference<List<ShardSpec>>() { }); List<HadoopyShardSpec> actualSpecs = Lists.newArrayListWithExpectedSize(specs.size()); for (int i = 0; i < specs.size(); ++i) { actualSpecs.add(new HadoopyShardSpec(specs.get(i), shardCount++)); log.info("DateTime[%s], partition[%d], spec[%s]", segmentGranularity, i, actualSpecs.get(i)); } shardSpecs.put(segmentGranularity.getStart(), actualSpecs); } else { log.info("Path[%s] didn't exist!?", partitionInfoPath); } } config.setShardSpecs(shardSpecs); return true; } catch (Exception e) { throw Throwables.propagate(e); } }
From source file:io.druid.indexer.HadoopDruidDetermineConfigurationJob.java
License:Apache License
@Override public boolean run() { List<Jobby> jobs = Lists.newArrayList(); JobHelper.ensurePaths(config);/*from www . j a v a 2 s . co m*/ if (config.isDeterminingPartitions()) { jobs.add(config.getPartitionsSpec().getPartitionJob(config)); } else { int shardsPerInterval = config.getPartitionsSpec().getNumShards(); Map<DateTime, List<HadoopyShardSpec>> shardSpecs = Maps.newTreeMap(DateTimeComparator.getInstance()); int shardCount = 0; for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) { DateTime bucket = segmentGranularity.getStart(); if (shardsPerInterval > 0) { List<HadoopyShardSpec> specs = Lists.newArrayListWithCapacity(shardsPerInterval); for (int i = 0; i < shardsPerInterval; i++) { specs.add(new HadoopyShardSpec(new HashBasedNumberedShardSpec(i, shardsPerInterval, HadoopDruidIndexerConfig.jsonMapper), shardCount++)); } shardSpecs.put(bucket, specs); log.info("DateTime[%s], spec[%s]", bucket, specs); } else { final HadoopyShardSpec spec = new HadoopyShardSpec(new NoneShardSpec(), shardCount++); shardSpecs.put(bucket, Lists.newArrayList(spec)); log.info("DateTime[%s], spec[%s]", bucket, spec); } } config.setShardSpecs(shardSpecs); } return JobHelper.runJobs(jobs, config); }
From source file:io.druid.indexer.HadoopDruidIndexerConfig.java
License:Apache License
public Optional<Iterable<Bucket>> getAllBuckets() { Optional<Set<Interval>> intervals = getSegmentGranularIntervals(); if (intervals.isPresent()) { return Optional.of((Iterable<Bucket>) FunctionalIterable.create(intervals.get()) .transformCat(new Function<Interval, Iterable<Bucket>>() { @Override//from w ww.j av a 2 s . c o m public Iterable<Bucket> apply(Interval input) { final DateTime bucketTime = input.getStart(); final List<HadoopyShardSpec> specs = schema.getTuningConfig().getShardSpecs() .get(bucketTime); if (specs == null) { return ImmutableList.of(); } return FunctionalIterable.create(specs) .transform(new Function<HadoopyShardSpec, Bucket>() { int i = 0; @Override public Bucket apply(HadoopyShardSpec input) { return new Bucket(input.getShardNum(), bucketTime, i++); } }); } })); } else { return Optional.absent(); } }