Example usage for org.joda.time Duration ZERO

List of usage examples for org.joda.time Duration ZERO

Introduction

In this page you can find the example usage for org.joda.time Duration ZERO.

Prototype

Duration ZERO

To view the source code for org.joda.time Duration ZERO.

Click Source Link

Document

Constant representing zero millisecond duration

Usage

From source file:org.apache.beam.runners.core.ReduceFnRunner.java

License:Apache License

public void onTimers(Iterable<TimerData> timers) throws Exception {
    if (!timers.iterator().hasNext()) {
        return;/*  www . ja v  a2  s.  c o m*/
    }

    // Create a reusable context for each window and begin prefetching necessary
    // state.
    Map<BoundedWindow, WindowActivation> windowActivations = new HashMap();

    for (TimerData timer : timers) {
        checkArgument(timer.getNamespace() instanceof WindowNamespace,
                "Expected timer to be in WindowNamespace, but was in %s", timer.getNamespace());
        @SuppressWarnings("unchecked")
        WindowNamespace<W> windowNamespace = (WindowNamespace<W>) timer.getNamespace();
        W window = windowNamespace.getWindow();

        WindowTracing.debug(
                "{}: Received timer key:{}; window:{}; data:{} with " + "inputWatermark:{}; outputWatermark:{}",
                ReduceFnRunner.class.getSimpleName(), key, window, timer,
                timerInternals.currentInputWatermarkTime(), timerInternals.currentOutputWatermarkTime());

        // Processing time timers for an expired window are ignored, just like elements
        // that show up too late. Window GC is management by an event time timer
        if (TimeDomain.EVENT_TIME != timer.getDomain() && windowIsExpired(window)) {
            continue;
        }

        // How a window is processed is a function only of the current state, not the details
        // of the timer. This makes us robust to large leaps in processing time and watermark
        // time, where both EOW and GC timers come in together and we need to GC and emit
        // the final pane.
        if (windowActivations.containsKey(window)) {
            continue;
        }

        ReduceFn<K, InputT, OutputT, W>.Context directContext = contextFactory.base(window, StateStyle.DIRECT);
        ReduceFn<K, InputT, OutputT, W>.Context renamedContext = contextFactory.base(window,
                StateStyle.RENAMED);
        WindowActivation windowActivation = new WindowActivation(directContext, renamedContext);
        windowActivations.put(window, windowActivation);

        // Perform prefetching of state to determine if the trigger should fire.
        if (windowActivation.isGarbageCollection) {
            triggerRunner.prefetchIsClosed(directContext.state());
        } else {
            triggerRunner.prefetchShouldFire(directContext.window(), directContext.state());
        }
    }

    // For those windows that are active and open, prefetch the triggering or emitting state.
    for (WindowActivation timer : windowActivations.values()) {
        if (timer.windowIsActiveAndOpen()) {
            ReduceFn<K, InputT, OutputT, W>.Context directContext = timer.directContext;
            if (timer.isGarbageCollection) {
                prefetchOnTrigger(directContext, timer.renamedContext);
            } else if (triggerRunner.shouldFire(directContext.window(), directContext.timers(),
                    directContext.state())) {
                prefetchEmit(directContext, timer.renamedContext);
            }
        }
    }

    // Perform processing now that everything is prefetched.
    for (WindowActivation windowActivation : windowActivations.values()) {
        ReduceFn<K, InputT, OutputT, W>.Context directContext = windowActivation.directContext;
        ReduceFn<K, InputT, OutputT, W>.Context renamedContext = windowActivation.renamedContext;

        if (windowActivation.isGarbageCollection) {
            WindowTracing.debug(
                    "{}: Cleaning up for key:{}; window:{} with inputWatermark:{}; outputWatermark:{}",
                    ReduceFnRunner.class.getSimpleName(), key, directContext.window(),
                    timerInternals.currentInputWatermarkTime(), timerInternals.currentOutputWatermarkTime());

            boolean windowIsActiveAndOpen = windowActivation.windowIsActiveAndOpen();
            if (windowIsActiveAndOpen) {
                // We need to call onTrigger to emit the final pane if required.
                // The final pane *may* be ON_TIME if no prior ON_TIME pane has been emitted,
                // and the watermark has passed the end of the window.
                @Nullable
                Instant newHold = onTrigger(directContext, renamedContext, true /* isFinished */,
                        windowActivation.isEndOfWindow);
                checkState(newHold == null, "Hold placed at %s despite isFinished being true.", newHold);
            }

            // Cleanup flavor B: Clear all the remaining state for this window since we'll never
            // see elements for it again.
            clearAllState(directContext, renamedContext, windowIsActiveAndOpen);
        } else {
            WindowTracing.debug(
                    "{}.onTimers: Triggering for key:{}; window:{} at {} with "
                            + "inputWatermark:{}; outputWatermark:{}",
                    key, directContext.window(), timerInternals.currentInputWatermarkTime(),
                    timerInternals.currentOutputWatermarkTime());
            if (windowActivation.windowIsActiveAndOpen() && triggerRunner.shouldFire(directContext.window(),
                    directContext.timers(), directContext.state())) {
                emit(directContext, renamedContext);
            }

            if (windowActivation.isEndOfWindow) {
                // If the window strategy trigger includes a watermark trigger then at this point
                // there should be no data holds, either because we'd already cleared them on an
                // earlier onTrigger, or because we just cleared them on the above emit.
                // We could assert this but it is very expensive.

                // Since we are processing an on-time firing we should schedule the garbage collection
                // timer. (If getAllowedLateness is zero then the timer event will be considered a
                // cleanup event and handled by the above).
                // Note we must do this even if the trigger is finished so that we are sure to cleanup
                // any final trigger finished bits.
                checkState(windowingStrategy.getAllowedLateness().isLongerThan(Duration.ZERO),
                        "Unexpected zero getAllowedLateness");
                Instant cleanupTime = LateDataUtils.garbageCollectionTime(directContext.window(),
                        windowingStrategy);
                WindowTracing.debug(
                        "ReduceFnRunner.onTimer: Scheduling cleanup timer for key:{}; window:{} at {} with "
                                + "inputWatermark:{}; outputWatermark:{}",
                        key, directContext.window(), cleanupTime, timerInternals.currentInputWatermarkTime(),
                        timerInternals.currentOutputWatermarkTime());
                checkState(!cleanupTime.isAfter(BoundedWindow.TIMESTAMP_MAX_VALUE),
                        "Cleanup time %s is beyond end-of-time", cleanupTime);
                directContext.timers().setTimer(cleanupTime, TimeDomain.EVENT_TIME);
            }
        }
    }
}

From source file:org.apache.beam.runners.dataflow.DataflowPipelineJob.java

License:Apache License

private static BackOff getMessagesBackoff(Duration duration) {
    FluentBackoff factory = MESSAGES_BACKOFF_FACTORY;

    if (!duration.isShorterThan(Duration.ZERO)) {
        factory = factory.withMaxCumulativeBackoff(duration);
    }// ww w.  j a va  2 s  .c  o m

    return BackOffAdapter.toGcpBackOff(factory.backoff());
}

From source file:org.apache.beam.runners.dataflow.DataflowPipelineJob.java

License:Apache License

/**
 * Reset backoff. If duration is limited, calculate time remaining, otherwise just reset retry
 * count./*w ww . j  ava  2 s  .c o  m*/
 *
 * <p>If a total duration for all backoff has been set, update the new cumulative sleep time to be
 * the remaining total backoff duration, stopping if we have already exceeded the allotted time.
 */
private static BackOff resetBackoff(Duration duration, NanoClock nanoClock, long startNanos) {
    BackOff backoff;
    if (duration.isLongerThan(Duration.ZERO)) {
        long nanosConsumed = nanoClock.nanoTime() - startNanos;
        Duration consumed = Duration.millis((nanosConsumed + 999999) / 1000000);
        Duration remaining = duration.minus(consumed);
        if (remaining.isLongerThan(Duration.ZERO)) {
            backoff = getMessagesBackoff(remaining);
        } else {
            backoff = BackOff.STOP_BACKOFF;
        }
    } else {
        backoff = getMessagesBackoff(duration);
    }
    return backoff;
}

From source file:org.apache.beam.runners.direct.ExecutorServiceParallelExecutor.java

License:Apache License

@Override
public State waitUntilFinish(Duration duration) throws Exception {
    Instant completionTime;//from  www.  j a  v  a 2 s.c  o  m
    if (duration.equals(Duration.ZERO)) {
        completionTime = new Instant(Long.MAX_VALUE);
    } else {
        completionTime = Instant.now().plus(duration);
    }

    VisibleExecutorUpdate update = null;
    while (Instant.now().isBefore(completionTime) && (update == null || isTerminalStateUpdate(update))) {
        // Get an update; don't block forever if another thread has handled it. The call to poll will
        // wait the entire timeout; this call primarily exists to relinquish any core.
        update = visibleUpdates.tryNext(Duration.millis(25L));
        if (update == null && pipelineState.get().isTerminal()) {
            // there are no updates to process and no updates will ever be published because the
            // executor is shutdown
            return pipelineState.get();
        } else if (update != null && update.thrown.isPresent()) {
            Throwable thrown = update.thrown.get();
            if (thrown instanceof Exception) {
                throw (Exception) thrown;
            } else if (thrown instanceof Error) {
                throw (Error) thrown;
            } else {
                throw new Exception("Unknown Type of Throwable", thrown);
            }
        }
    }
    return pipelineState.get();
}

From source file:org.apache.beam.runners.direct.portable.ReferenceRunner.java

License:Apache License

public void execute() throws Exception {
    ExecutableGraph<PTransformNode, PCollectionNode> graph = PortableGraph.forPipeline(pipeline);
    BundleFactory bundleFactory = ImmutableListBundleFactory.create();
    EvaluationContext ctxt = EvaluationContext.create(Instant::new, bundleFactory, graph,
            getKeyedPCollections(graph));
    RootProviderRegistry rootRegistry = RootProviderRegistry.javaPortableRegistry(bundleFactory);
    int targetParallelism = Math.max(Runtime.getRuntime().availableProcessors(), 3);
    ServerFactory serverFactory = createServerFactory();
    ControlClientPool controlClientPool = MapControlClientPool.create();
    ExecutorService dataExecutor = Executors.newCachedThreadPool();
    ProvisionInfo provisionInfo = ProvisionInfo.newBuilder().setJobId("id").setJobName("reference")
            .setPipelineOptions(options).setWorkerId("foo").setResourceLimits(Resources.getDefaultInstance())
            .build();/* w w w  .  j  a  va 2 s.c  om*/
    try (GrpcFnServer<GrpcLoggingService> logging = GrpcFnServer
            .allocatePortAndCreateFor(GrpcLoggingService.forWriter(Slf4jLogWriter.getDefault()), serverFactory);
            GrpcFnServer<ArtifactRetrievalService> artifact = artifactsDir == null
                    ? GrpcFnServer.allocatePortAndCreateFor(UnsupportedArtifactRetrievalService.create(),
                            serverFactory)
                    : GrpcFnServer.allocatePortAndCreateFor(
                            LocalFileSystemArtifactRetrievalService.forRootDirectory(artifactsDir),
                            serverFactory);
            GrpcFnServer<StaticGrpcProvisionService> provisioning = GrpcFnServer
                    .allocatePortAndCreateFor(StaticGrpcProvisionService.create(provisionInfo), serverFactory);
            GrpcFnServer<FnApiControlClientPoolService> control = GrpcFnServer.allocatePortAndCreateFor(
                    FnApiControlClientPoolService.offeringClientsToPool(controlClientPool.getSink(),
                            GrpcContextHeaderAccessorProvider.getHeaderAccessor()),
                    serverFactory);
            GrpcFnServer<GrpcDataService> data = GrpcFnServer.allocatePortAndCreateFor(
                    GrpcDataService.create(dataExecutor, OutboundObserverFactory.serverDirect()),
                    serverFactory);
            GrpcFnServer<GrpcStateService> state = GrpcFnServer
                    .allocatePortAndCreateFor(GrpcStateService.create(), serverFactory)) {

        EnvironmentFactory environmentFactory = createEnvironmentFactory(control, logging, artifact,
                provisioning, controlClientPool);
        JobBundleFactory jobBundleFactory = SingleEnvironmentInstanceJobBundleFactory.create(environmentFactory,
                data, state);

        TransformEvaluatorRegistry transformRegistry = TransformEvaluatorRegistry.portableRegistry(graph,
                pipeline.getComponents(), bundleFactory, jobBundleFactory,
                EvaluationContextStepStateAndTimersProvider.forContext(ctxt));
        ExecutorServiceParallelExecutor executor = ExecutorServiceParallelExecutor.create(targetParallelism,
                rootRegistry, transformRegistry, graph, ctxt);
        executor.start();
        executor.waitUntilFinish(Duration.ZERO);
    } finally {
        dataExecutor.shutdown();
    }
}

From source file:org.apache.beam.runners.flink.examples.streaming.AutoComplete.java

License:Apache License

public static void main(String[] args) throws IOException {
    Options options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class);
    options.setStreaming(true);//from w w  w  .j  a  v  a 2s.c  o  m
    options.setCheckpointingInterval(1000L);
    options.setNumberOfExecutionRetries(5);
    options.setExecutionRetryDelay(3000L);
    options.setRunner(FlinkRunner.class);

    WindowFn<Object, ?> windowFn = FixedWindows.of(Duration.standardSeconds(options.getWindowSize()));

    // Create the pipeline.
    Pipeline p = Pipeline.create(options);
    PCollection<KV<String, List<CompletionCandidate>>> toWrite = p
            .apply("WordStream", Read.from(new UnboundedSocketSource<>("localhost", 9999, '\n', 3)))
            .apply(ParDo.of(new ExtractWordsFn()))
            .apply(Window.<String>into(windowFn).triggering(AfterWatermark.pastEndOfWindow())
                    .withAllowedLateness(Duration.ZERO).discardingFiredPanes())
            .apply(ComputeTopCompletions.top(10, options.getRecursive()));

    toWrite.apply("FormatForPerTaskFile", ParDo.of(new FormatForPerTaskLocalFile()))
            .apply(TextIO.Write.to("./outputAutoComplete.txt"));

    p.run();
}

From source file:org.apache.beam.runners.flink.examples.streaming.JoinExamples.java

License:Apache License

public static void main(String[] args) throws Exception {
    Options options = PipelineOptionsFactory.fromArgs(args).withValidation().as(Options.class);
    options.setStreaming(true);/* www  .j  av  a 2 s.  c om*/
    options.setCheckpointingInterval(1000L);
    options.setNumberOfExecutionRetries(5);
    options.setExecutionRetryDelay(3000L);
    options.setRunner(FlinkRunner.class);

    WindowFn<Object, ?> windowFn = FixedWindows.of(Duration.standardSeconds(options.getWindowSize()));

    Pipeline p = Pipeline.create(options);

    // the following two 'applys' create multiple inputs to our pipeline, one for each
    // of our two input sources.
    PCollection<String> streamA = p
            .apply("FirstStream", Read.from(new UnboundedSocketSource<>("localhost", 9999, '\n', 3)))
            .apply(Window.<String>into(windowFn).triggering(AfterWatermark.pastEndOfWindow())
                    .withAllowedLateness(Duration.ZERO).discardingFiredPanes());
    PCollection<String> streamB = p
            .apply("SecondStream", Read.from(new UnboundedSocketSource<>("localhost", 9998, '\n', 3)))
            .apply(Window.<String>into(windowFn).triggering(AfterWatermark.pastEndOfWindow())
                    .withAllowedLateness(Duration.ZERO).discardingFiredPanes());

    PCollection<String> formattedResults = joinEvents(streamA, streamB);
    formattedResults.apply(TextIO.Write.to("./outputJoin.txt"));
    p.run();
}

From source file:org.apache.beam.runners.flink.examples.streaming.KafkaWindowedWordCountExample.java

License:Apache License

public static void main(String[] args) {
    PipelineOptionsFactory.register(KafkaStreamingWordCountOptions.class);
    KafkaStreamingWordCountOptions options = PipelineOptionsFactory.fromArgs(args)
            .as(KafkaStreamingWordCountOptions.class);
    options.setJobName("KafkaExample - WindowSize: " + options.getWindowSize() + " seconds");
    options.setStreaming(true);//w  ww  .  jav  a2s . c o  m
    options.setCheckpointingInterval(1000L);
    options.setNumberOfExecutionRetries(5);
    options.setExecutionRetryDelay(3000L);
    options.setRunner(FlinkRunner.class);

    System.out.println(options.getKafkaTopic() + " " + options.getZookeeper() + " " + options.getBroker() + " "
            + options.getGroup());
    Pipeline pipeline = Pipeline.create(options);

    Properties p = new Properties();
    p.setProperty("zookeeper.connect", options.getZookeeper());
    p.setProperty("bootstrap.servers", options.getBroker());
    p.setProperty("group.id", options.getGroup());

    // this is the Flink consumer that reads the input to
    // the program from a kafka topic.
    FlinkKafkaConsumer08<String> kafkaConsumer = new FlinkKafkaConsumer08<>(options.getKafkaTopic(),
            new SimpleStringSchema(), p);

    PCollection<String> words = pipeline
            .apply("StreamingWordCount", Read.from(UnboundedFlinkSource.of(kafkaConsumer)))
            .apply(ParDo.of(new ExtractWordsFn()))
            .apply(Window.<String>into(FixedWindows.of(Duration.standardSeconds(options.getWindowSize())))
                    .triggering(AfterWatermark.pastEndOfWindow()).withAllowedLateness(Duration.ZERO)
                    .discardingFiredPanes());

    PCollection<KV<String, Long>> wordCounts = words.apply(Count.<String>perElement());

    wordCounts.apply(ParDo.of(new FormatAsStringFn())).apply(TextIO.Write.to("./outputKafka.txt"));

    pipeline.run();
}

From source file:org.apache.beam.runners.flink.examples.streaming.WindowedWordCount.java

License:Apache License

public static void main(String[] args) throws IOException {
    StreamingWordCountOptions options = PipelineOptionsFactory.fromArgs(args).withValidation()
            .as(StreamingWordCountOptions.class);
    options.setStreaming(true);/*  w ww.j  a  va2  s .  c  o  m*/
    options.setWindowSize(10L);
    options.setSlide(5L);
    options.setCheckpointingInterval(1000L);
    options.setNumberOfExecutionRetries(5);
    options.setExecutionRetryDelay(3000L);
    options.setRunner(FlinkRunner.class);

    LOG.info("Windpwed WordCount with Sliding Windows of " + options.getWindowSize() + " sec. and a slide of "
            + options.getSlide());

    Pipeline pipeline = Pipeline.create(options);

    PCollection<String> words = pipeline
            .apply("StreamingWordCount", Read.from(new UnboundedSocketSource<>("localhost", 9999, '\n', 3)))
            .apply(ParDo.of(new ExtractWordsFn()))
            .apply(Window
                    .<String>into(SlidingWindows.of(Duration.standardSeconds(options.getWindowSize()))
                            .every(Duration.standardSeconds(options.getSlide())))
                    .triggering(AfterWatermark.pastEndOfWindow()).withAllowedLateness(Duration.ZERO)
                    .discardingFiredPanes());

    PCollection<KV<String, Long>> wordCounts = words.apply(Count.<String>perElement());

    wordCounts.apply(ParDo.of(new FormatAsStringFn())).apply(TextIO.Write.to("./outputWordCount.txt"));

    pipeline.run();
}

From source file:org.apache.beam.sdk.io.CountingSource.java

License:Apache License

/** Create a new {@link UnboundedCountingSource}. */
// package-private to return a typed UnboundedCountingSource rather than the UnboundedSource type.
static UnboundedCountingSource createUnboundedFrom(long start) {
    return new UnboundedCountingSource(start, 1, 1L, Duration.ZERO, new NowTimestampFn());
}