List of usage examples for java.lang Long MIN_VALUE
long MIN_VALUE
To view the source code for java.lang Long MIN_VALUE.
Click Source Link
From source file:org.apache.crunch.kafka.inputformat.KafkaInputFormat.java
/** * Reads the {@code configuration} to determine which topics, partitions, and offsets should be used for reading data. * * @param configuration the configuration to derive the data to read. * @return a map of {@link TopicPartition} to a pair of start and end offsets. * @throws IllegalStateException if the {@code configuration} does not have the start and end offsets set properly * for a partition.//from www . j a v a 2 s .co m */ public static Map<TopicPartition, Pair<Long, Long>> getOffsets(Configuration configuration) { Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); //find configuration for all of the topics with defined partitions Map<String, String> topicPartitionKeys = configuration.getValByRegex(TOPIC_KEY_REGEX); //for each topic start to process it's partitions for (String key : topicPartitionKeys.keySet()) { String topic = getTopicFromKey(key); int[] partitions = configuration.getInts(key); //for each partition find and add the start/end offset for (int partitionId : partitions) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); long start = configuration.getLong(generatePartitionStartKey(topic, partitionId), Long.MIN_VALUE); long end = configuration.getLong(generatePartitionEndKey(topic, partitionId), Long.MIN_VALUE); if (start == Long.MIN_VALUE || end == Long.MIN_VALUE) { throw new IllegalStateException("The " + topicPartition + "has an invalid start:" + start + " or end:" + end + " offset configured."); } offsets.put(topicPartition, Pair.of(start, end)); } } return offsets; }
From source file:com.baidu.oped.apm.common.buffer.AutomaticBufferTest.java
@Test public void testPutSVarLong() throws Exception { Buffer buffer = new AutomaticBuffer(32); buffer.putSVar(Long.MAX_VALUE); buffer.putSVar(Long.MIN_VALUE); buffer.putSVar(0L);// w w w.java 2 s . c om buffer.putSVar(1L); buffer.putSVar(12345L); buffer.setOffset(0); Assert.assertEquals(buffer.readSVarLong(), Long.MAX_VALUE); Assert.assertEquals(buffer.readSVarLong(), Long.MIN_VALUE); Assert.assertEquals(buffer.readSVarLong(), 0L); Assert.assertEquals(buffer.readSVarLong(), 1L); Assert.assertEquals(buffer.readSVarLong(), 12345L); }
From source file:org.apache.flink.hdfstests.ContinuousFileProcessingFrom12MigrationTest.java
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. *//*from w w w.j av a 2 s .co m*/ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>( format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>( monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>( src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorStateHandles snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle(snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink1.2-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }
From source file:com.android.deskclock.data.TimerModel.java
/** * @param length the length of the timer in milliseconds * @param label describes the purpose of the timer * @param deleteAfterUse {@code true} indicates the timer should be deleted when it is reset * @return the newly added timer/*w ww . j a v a2 s. c o m*/ */ Timer addTimer(long length, String label, boolean deleteAfterUse) { // Create the timer instance. Timer timer = new Timer(-1, RESET, length, length, Long.MIN_VALUE, length, label, deleteAfterUse); // Add the timer to permanent storage. timer = TimerDAO.addTimer(mContext, timer); // Add the timer to the cache. getMutableTimers().add(0, timer); // Update the timer notification. updateNotification(); // Heads-Up notification is unaffected by this change // Notify listeners of the change. for (TimerListener timerListener : mTimerListeners) { timerListener.timerAdded(timer); } return timer; }
From source file:org.apache.crunch.kafka.record.KafkaInputFormat.java
/** * Reads the {@code configuration} to determine which topics, partitions, and offsets should be used for reading data. * * @param configuration the configuration to derive the data to read. * @return a map of {@link TopicPartition} to a pair of start and end offsets. * @throws IllegalStateException if the {@code configuration} does not have the start and end offsets set properly * for a partition. */// w ww .j ava2 s . co m public static Map<TopicPartition, Pair<Long, Long>> getOffsets(Configuration configuration) { Map<TopicPartition, Pair<Long, Long>> offsets = new HashMap<>(); //find configuration for all of the topics with defined partitions Map<String, String> topicPartitionKeys = configuration.getValByRegex(TOPIC_KEY_REGEX); //for each topic start to process it's partitions for (String key : topicPartitionKeys.keySet()) { String topic = getTopicFromKey(key); int[] partitions = configuration.getInts(key); //for each partition find and add the start/end offset for (int partitionId : partitions) { TopicPartition topicPartition = new TopicPartition(topic, partitionId); long start = configuration.getLong(generatePartitionStartKey(topic, partitionId), Long.MIN_VALUE); long end = configuration.getLong(generatePartitionEndKey(topic, partitionId), Long.MIN_VALUE); if (start == Long.MIN_VALUE || end == Long.MIN_VALUE) { throw new IllegalStateException("The " + topicPartition + " has an invalid start:" + start + " or end:" + end + " offset configured."); } offsets.put(topicPartition, Pair.of(start, end)); } } return offsets; }
From source file:com.onyx.deskclock.deskclock.data.TimerModel.java
/** * @param length the length of the timer in milliseconds * @param label describes the purpose of the timer * @param deleteAfterUse {@code true} indicates the timer should be deleted when it is reset * @return the newly added timer// w ww . j av a 2s. c o m */ Timer addTimer(long length, String label, boolean deleteAfterUse) { // Create the timer instance. Timer timer = new Timer(-1, Timer.State.RESET, length, length, Long.MIN_VALUE, length, label, deleteAfterUse); // Add the timer to permanent storage. timer = TimerDAO.addTimer(mContext, timer); // Add the timer to the cache. getMutableTimers().add(0, timer); // Update the timer notification. updateNotification(); // Heads-Up notification is unaffected by this change // Notify listeners of the change. for (TimerListener timerListener : mTimerListeners) { timerListener.timerAdded(timer); } return timer; }
From source file:com.navercorp.pinpoint.common.buffer.AutomaticBufferTest.java
@Test public void testPutSVLong() throws Exception { Buffer buffer = new AutomaticBuffer(32); buffer.putSVLong(Long.MAX_VALUE); buffer.putSVLong(Long.MIN_VALUE); buffer.putSVLong(0L);/*from w w w . j a v a2 s . com*/ buffer.putSVLong(1L); buffer.putSVLong(12345L); buffer.setOffset(0); Assert.assertEquals(buffer.readSVLong(), Long.MAX_VALUE); Assert.assertEquals(buffer.readSVLong(), Long.MIN_VALUE); Assert.assertEquals(buffer.readSVLong(), 0L); Assert.assertEquals(buffer.readSVLong(), 1L); Assert.assertEquals(buffer.readSVLong(), 12345L); }
From source file:com.navercorp.pinpoint.flink.mapper.thrift.stat.JoinAgentStatBoMapper.java
private long getTimeStamp(JoinAgentStatBo joinAgentStatBo) { List<JoinCpuLoadBo> joinCpuLoadBoList = joinAgentStatBo.getJoinCpuLoadBoList(); if (joinCpuLoadBoList.size() != 0) { return joinCpuLoadBoList.get(0).getTimestamp(); }//from w w w . j av a 2s . c om List<JoinMemoryBo> joinMemoryBoList = joinAgentStatBo.getJoinMemoryBoList(); if (joinMemoryBoList.size() != 0) { return joinMemoryBoList.get(0).getTimestamp(); } List<JoinTransactionBo> joinTransactionBoList = joinAgentStatBo.getJoinTransactionBoList(); if (joinTransactionBoList.size() != 0) { return joinTransactionBoList.get(0).getTimestamp(); } List<JoinActiveTraceBo> joinActiveTraceBoList = joinAgentStatBo.getJoinActiveTraceBoList(); if (joinActiveTraceBoList.size() != 0) { return joinActiveTraceBoList.get(0).getTimestamp(); } List<JoinResponseTimeBo> joinResponseTimeBoList = joinAgentStatBo.getJoinResponseTimeBoList(); if (joinResponseTimeBoList.size() != 0) { return joinResponseTimeBoList.get(0).getTimestamp(); } List<JoinDataSourceListBo> joinDataSourceListBoList = joinAgentStatBo.getJoinDataSourceListBoList(); if (joinDataSourceListBoList.size() != 0) { return joinDataSourceListBoList.get(0).getTimestamp(); } List<JoinFileDescriptorBo> joinFileDescriptorBoList = joinAgentStatBo.getJoinFileDescriptorBoList(); if (joinFileDescriptorBoList.size() != 0) { return joinFileDescriptorBoList.get(0).getTimestamp(); } List<JoinDirectBufferBo> joinDirectBufferBoList = joinAgentStatBo.getJoinDirectBufferBoList(); if (joinDirectBufferBoList.size() != 0) { return joinDirectBufferBoList.get(0).getTimestamp(); } return Long.MIN_VALUE; }
From source file:Counter.java
/** Prints the results. */ public void printResults(PrintWriter out, String uri, long time, long memory, boolean tagginess, int repetition) { // filename.xml: 631 ms (4 elems, 0 attrs, 78 spaces, 0 chars) out.print(uri);/*from ww w . ja v a2 s .c o m*/ out.print(": "); if (repetition == 1) { out.print(time); } else { out.print(time); out.print('/'); out.print(repetition); out.print('='); out.print(time / repetition); } out.print(" ms"); if (memory != Long.MIN_VALUE) { out.print(", "); out.print(memory); out.print(" bytes"); } out.print(" ("); out.print(fElements); out.print(" elems, "); out.print(fAttributes); out.print(" attrs, "); out.print(fIgnorableWhitespace); out.print(" spaces, "); out.print(fCharacters); out.print(" chars)"); if (tagginess) { out.print(' '); long totalCharacters = fTagCharacters + fOtherCharacters + fCharacters + fIgnorableWhitespace; long tagValue = fTagCharacters * 100 / totalCharacters; out.print(tagValue); out.print("% tagginess"); } out.println(); out.flush(); }
From source file:com.amalto.core.storage.hibernate.LuceneQueryGenerator.java
@Override public Query visit(Range range) { if (range.getExpression() instanceof MetadataField) { if (range.getExpression() instanceof Timestamp) { Timestamp field = (Timestamp) range.getExpression(); field.accept(this); } else {/* w w w . j a va2 s .c om*/ MetadataField field = (MetadataField) range.getExpression(); field.getProjectionExpression().accept(this); } } range.getStart().accept(this); Long currentRangeStart = ((Long) currentValue) == Long.MIN_VALUE ? null : (Long) currentValue; range.getEnd().accept(this); Long currentRangeEnd = ((Long) currentValue) == Long.MAX_VALUE ? null : (Long) currentValue; return NumericRangeQuery.newLongRange(currentFieldName, currentRangeStart, currentRangeEnd, true, true); }