List of usage examples for java.lang Math addExact
@HotSpotIntrinsicCandidate public static long addExact(long x, long y)
From source file:com.hazelcast.jet.benchmark.trademonitor.FlinkTradeMonitor.java
public static void main(String[] args) throws Exception { if (args.length != 13) { System.err.println("Usage:"); System.err.println(" " + FlinkTradeMonitor.class.getSimpleName() + " <bootstrap.servers> <topic> <offset-reset> <maxLagMs> <windowSizeMs> <slideByMs> <outputPath> <checkpointInterval> <checkpointUri> <doAsyncSnapshot> <stateBackend> <kafkaParallelism> <windowParallelism>"); System.err.println("<stateBackend> - fs | rocksDb"); System.exit(1);//from w ww . ja v a 2 s . c o m } String brokerUri = args[0]; String topic = args[1]; String offsetReset = args[2]; int lagMs = Integer.parseInt(args[3]); int windowSize = Integer.parseInt(args[4]); int slideBy = Integer.parseInt(args[5]); String outputPath = args[6]; int checkpointInt = Integer.parseInt(args[7]); String checkpointUri = args[8]; boolean doAsyncSnapshot = Boolean.parseBoolean(args[9]); String stateBackend = args[10]; int kafkaParallelism = Integer.parseInt(args[11]); int windowParallelism = Integer.parseInt(args[12]); System.out.println("bootstrap.servers: " + brokerUri); System.out.println("topic: " + topic); System.out.println("offset-reset: " + offsetReset); System.out.println("lag: " + lagMs); System.out.println("windowSize: " + windowSize); System.out.println("slideBy: " + slideBy); System.out.println("outputPath: " + outputPath); System.out.println("checkpointInt: " + checkpointInt); System.out.println("checkpointUri: " + checkpointUri); System.out.println("doAsyncSnapshot: " + doAsyncSnapshot); System.out.println("stateBackend: " + stateBackend); System.out.println("kafkaParallelism: " + kafkaParallelism); System.out.println("windowParallelism: " + windowParallelism); // set up the execution environment StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime); if (checkpointInt > 0) { env.enableCheckpointing(checkpointInt); env.getCheckpointConfig().setMinPauseBetweenCheckpoints(checkpointInt); } env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 5000)); if ("fs".equalsIgnoreCase(stateBackend)) { env.setStateBackend(new FsStateBackend(checkpointUri, doAsyncSnapshot)); } else if ("rocksDb".equalsIgnoreCase(stateBackend)) { env.setStateBackend(new RocksDBStateBackend(checkpointUri)); } else { System.err.println("Bad value for stateBackend: " + stateBackend); System.exit(1); } DeserializationSchema<Trade> schema = new AbstractDeserializationSchema<Trade>() { TradeDeserializer deserializer = new TradeDeserializer(); @Override public Trade deserialize(byte[] message) throws IOException { return deserializer.deserialize(null, message); } }; DataStreamSource<Trade> trades = env .addSource(new FlinkKafkaConsumer010<>(topic, schema, getKafkaProperties(brokerUri, offsetReset))) .setParallelism(kafkaParallelism); AssignerWithPeriodicWatermarks<Trade> timestampExtractor = new BoundedOutOfOrdernessTimestampExtractor<Trade>( Time.milliseconds(lagMs)) { @Override public long extractTimestamp(Trade element) { return element.getTime(); } }; WindowAssigner window = windowSize == slideBy ? TumblingEventTimeWindows.of(Time.milliseconds(windowSize)) : SlidingEventTimeWindows.of(Time.milliseconds(windowSize), Time.milliseconds(slideBy)); trades.assignTimestampsAndWatermarks(timestampExtractor).keyBy((Trade t) -> t.getTicker()).window(window) .aggregate(new AggregateFunction<Trade, MutableLong, Long>() { @Override public MutableLong createAccumulator() { return new MutableLong(); } @Override public MutableLong add(Trade value, MutableLong accumulator) { accumulator.increment(); return accumulator; } @Override public MutableLong merge(MutableLong a, MutableLong b) { a.setValue(Math.addExact(a.longValue(), b.longValue())); return a; } @Override public Long getResult(MutableLong accumulator) { return accumulator.longValue(); } }, new WindowFunction<Long, Tuple5<String, String, Long, Long, Long>, String, TimeWindow>() { @Override public void apply(String key, TimeWindow window, Iterable<Long> input, Collector<Tuple5<String, String, Long, Long, Long>> out) throws Exception { long timeMs = System.currentTimeMillis(); long count = input.iterator().next(); long latencyMs = timeMs - window.getEnd() - lagMs; out.collect( new Tuple5<>(Instant.ofEpochMilli(window.getEnd()).atZone(ZoneId.systemDefault()) .toLocalTime().toString(), key, count, timeMs, latencyMs)); } }).setParallelism(windowParallelism).writeAsCsv(outputPath, WriteMode.OVERWRITE); env.execute("Trade Monitor Example"); }
From source file:io.prestosql.operator.scalar.MathFunctions.java
@Description("The bucket number of a value given a lower and upper bound and the number of buckets") @ScalarFunction("width_bucket") @SqlType(StandardTypes.BIGINT)//from www. j av a2 s .c o m public static long widthBucket(@SqlType(StandardTypes.DOUBLE) double operand, @SqlType(StandardTypes.DOUBLE) double bound1, @SqlType(StandardTypes.DOUBLE) double bound2, @SqlType(StandardTypes.BIGINT) long bucketCount) { checkCondition(bucketCount > 0, INVALID_FUNCTION_ARGUMENT, "bucketCount must be greater than 0"); checkCondition(!isNaN(operand), INVALID_FUNCTION_ARGUMENT, "operand must not be NaN"); checkCondition(isFinite(bound1), INVALID_FUNCTION_ARGUMENT, "first bound must be finite"); checkCondition(isFinite(bound2), INVALID_FUNCTION_ARGUMENT, "second bound must be finite"); checkCondition(bound1 != bound2, INVALID_FUNCTION_ARGUMENT, "bounds cannot equal each other"); long result; double lower = Math.min(bound1, bound2); double upper = Math.max(bound1, bound2); if (operand < lower) { result = 0; } else if (operand >= upper) { try { result = Math.addExact(bucketCount, 1); } catch (ArithmeticException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("Bucket for value %s is out of range", operand)); } } else { result = (long) ((double) bucketCount * (operand - lower) / (upper - lower) + 1); } if (bound1 > bound2) { result = (bucketCount - result) + 1; } return result; }
From source file:com.facebook.presto.operator.scalar.MathFunctions.java
@Description("The bucket number of a value given a lower and upper bound and the number of buckets") @ScalarFunction("width_bucket") @SqlType(StandardTypes.BIGINT)//from w w w . j av a 2s . c o m public static long widthBucket(@SqlType(StandardTypes.DOUBLE) double operand, @SqlType(StandardTypes.DOUBLE) double bound1, @SqlType(StandardTypes.DOUBLE) double bound2, @SqlType(StandardTypes.BIGINT) long bucketCount) { checkCondition(bucketCount > 0, INVALID_FUNCTION_ARGUMENT, "bucketCount must be greater than 0"); checkCondition(!isNaN(operand), INVALID_FUNCTION_ARGUMENT, "operand must not be NaN"); checkCondition(isFinite(bound1), INVALID_FUNCTION_ARGUMENT, "first bound must be finite"); checkCondition(isFinite(bound2), INVALID_FUNCTION_ARGUMENT, "second bound must be finite"); checkCondition(bound1 != bound2, INVALID_FUNCTION_ARGUMENT, "bounds cannot equal each other"); long result = 0; double lower = Math.min(bound1, bound2); double upper = Math.max(bound1, bound2); if (operand < lower) { result = 0; } else if (operand >= upper) { try { result = Math.addExact(bucketCount, 1); } catch (ArithmeticException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("Bucket for value %s is out of range", operand)); } } else { result = (long) ((double) bucketCount * (operand - lower) / (upper - lower) + 1); } if (bound1 > bound2) { result = (bucketCount - result) + 1; } return result; }