List of usage examples for java.time Duration toMillis
public long toMillis()
From source file:io.pravega.segmentstore.server.reading.StorageReaderTests.java
/** * Tests the ability to queue dependent reads (subsequent reads that only want to read a part of a previous read). * Test this both with successful and failed reads. *//*from ww w. j a v a2s . c o m*/ @Test public void testDependents() { final Duration waitTimeout = Duration.ofSeconds(5); TestStorage storage = new TestStorage(); CompletableFuture<Integer> signal = new CompletableFuture<>(); AtomicBoolean wasReadInvoked = new AtomicBoolean(); storage.readImplementation = () -> { if (wasReadInvoked.getAndSet(true)) { Assert.fail( "Read was invoked multiple times, which is a likely indicator that the requests were not chained."); } return signal; }; @Cleanup StorageReader reader = new StorageReader(SEGMENT_METADATA, storage, executorService()); // Create some reads. CompletableFuture<StorageReader.Result> c1 = new CompletableFuture<>(); CompletableFuture<StorageReader.Result> c2 = new CompletableFuture<>(); reader.execute(new StorageReader.Request(0, 100, c1::complete, c1::completeExceptionally, TIMEOUT)); reader.execute(new StorageReader.Request(50, 100, c2::complete, c2::completeExceptionally, TIMEOUT)); Assert.assertFalse("One or more of the reads has completed prematurely.", c1.isDone() || c2.isDone()); signal.completeExceptionally(new IntentionalException()); AssertExtensions.assertThrows("The first read was not failed with the correct exception.", () -> c1.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof IntentionalException); AssertExtensions.assertThrows("The second read was not failed with the correct exception.", () -> c2.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof IntentionalException); }
From source file:io.pravega.segmentstore.server.reading.StorageReadManagerTests.java
/** * Tests the ability to queue dependent reads (subsequent reads that only want to read a part of a previous read). * Test this both with successful and failed reads. *//*from w w w . j a v a 2 s . c o m*/ @Test public void testDependents() { final Duration waitTimeout = Duration.ofSeconds(5); TestStorage storage = new TestStorage(); CompletableFuture<Integer> signal = new CompletableFuture<>(); AtomicBoolean wasReadInvoked = new AtomicBoolean(); storage.readImplementation = () -> { if (wasReadInvoked.getAndSet(true)) { Assert.fail( "Read was invoked multiple times, which is a likely indicator that the requests were not chained."); } return signal; }; @Cleanup StorageReadManager reader = new StorageReadManager(SEGMENT_METADATA, storage, executorService()); // Create some reads. CompletableFuture<StorageReadManager.Result> c1 = new CompletableFuture<>(); CompletableFuture<StorageReadManager.Result> c2 = new CompletableFuture<>(); reader.execute(new StorageReadManager.Request(0, 100, c1::complete, c1::completeExceptionally, TIMEOUT)); reader.execute(new StorageReadManager.Request(50, 100, c2::complete, c2::completeExceptionally, TIMEOUT)); Assert.assertFalse("One or more of the reads has completed prematurely.", c1.isDone() || c2.isDone()); signal.completeExceptionally(new IntentionalException()); AssertExtensions.assertThrows("The first read was not failed with the correct exception.", () -> c1.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof IntentionalException); AssertExtensions.assertThrows("The second read was not failed with the correct exception.", () -> c2.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS), ex -> ex instanceof IntentionalException); }
From source file:net.di2e.ecdr.describe.generator.DescribeGeneratorImpl.java
protected void setRecordRate(MetricsType metrics, Map<String, TemporalCoverageHolder> timeMap) { TemporalCoverageHolder tc = timeMap.containsKey("modified") ? (TemporalCoverageHolder) timeMap.get("modified") : (timeMap.containsKey("effective") ? (TemporalCoverageHolder) timeMap.get("effective") : (TemporalCoverageHolder) timeMap.get("created")); try {//from w ww .j ava 2 s. c om if (tc != null) { Date startDate = tc.getStartDate(); if (startDate != null) { long totalHits = metrics.getCount(); LocalDateTime start = LocalDateTime.ofInstant(startDate.toInstant(), ZoneId.systemDefault()); Duration duration = Duration.between(start, LocalDateTime.now()); RecordRateType rate = new RecordRateType(); metrics.setRecordRate(rate); long dur = totalHits / duration.toHours(); if (dur < 15L) { dur = totalHits / duration.toDays(); if (dur < 4L) { dur = totalHits * 30L / duration.toDays(); if (dur < 10L) { dur = totalHits * 365L / duration.toDays(); rate.setFrequency("Yearly"); } else { rate.setFrequency("Monthly"); } } else { rate.setFrequency("Daily"); } } else if (totalHits > 1000L) { dur = duration.toMinutes(); if (totalHits > 1000L) { dur = duration.toMillis() / 1000L; rate.setFrequency("Second"); } else { rate.setFrequency("Minute"); } } else { rate.setFrequency("Hourly"); } rate.setValue((int) dur); } } } catch (Exception e) { LOGGER.warn("Could not set record rate: {}", e.getMessage(), e); } }
From source file:com.joyent.manta.client.multipart.JobsMultipartManager.java
/** * Waits for a multipart upload to complete. Polling for set interval. * * @param <R> Return type for executeWhenTimesToPollExceeded * @param upload multipart upload object * @param pingInterval interval to poll//w w w. j a v a 2s .co m * @param timesToPoll number of times to poll Manta to check for completion * @param executeWhenTimesToPollExceeded lambda executed when timesToPoll has been exceeded * @return null when under poll timeout, otherwise returns return value of executeWhenTimesToPollExceeded * @throws IOException thrown if there is a problem connecting to Manta */ public <R> R waitForCompletion(final MantaMultipartUpload upload, final Duration pingInterval, final int timesToPoll, final Function<UUID, R> executeWhenTimesToPollExceeded) throws IOException { if (timesToPoll <= 0) { String msg = String.format( "times to poll should be set to a value greater than 1. " + "Actual value: %d", timesToPoll); throw new IllegalArgumentException(msg); } final String dir = multipartUploadDir(upload.getId()); final MantaJob job = findJob(upload); if (job == null) { String msg = "Unable for find job associated with multipart upload. " + "Was complete() run for upload or was it run so long ago " + "that we no longer have a record for it?"; MantaMultipartException e = new MantaMultipartException(msg); e.setContextValue("upload_id", upload.getId().toString()); e.setContextValue("upload_directory", dir); e.setContextValue("job_id", job.getId().toString()); throw e; } final long waitMillis = pingInterval.toMillis(); int timesPolled; /* We ping the upload directory and wait for it to be deleted because * there is the chance for a race condition when the job attempts to * delete the upload directory, but isn't finished. */ for (timesPolled = 0; timesPolled < timesToPoll; timesPolled++) { try { final MantaMultipartStatus status = getStatus(upload, job.getId()); // We do a check preemptively because we shouldn't sleep unless we need to if (status.equals(MantaMultipartStatus.COMPLETED)) { return null; } if (status.equals(MantaMultipartStatus.ABORTED)) { String msg = "Manta job backing multipart upload was aborted. " + "This upload was unable to be completed."; MantaMultipartException e = new MantaMultipartException(msg); e.setContextValue("upload_id", upload.getId().toString()); e.setContextValue("upload_directory", dir); e.setContextValue("job_id", job.getId().toString()); throw e; } if (status.equals(MantaMultipartStatus.UNKNOWN)) { String msg = "Manta job backing multipart upload was is in " + "a unknown state. Typically this means that we " + "are unable to get the status of the job backing " + "the multipart upload."; MantaMultipartException e = new MantaMultipartException(msg); e.setContextValue("upload_id", upload.getId().toString()); e.setContextValue("upload_directory", dir); e.setContextValue("job_id", job.getId().toString()); throw e; } // Don't bother to sleep if we won't be doing a check if (timesPolled < timesToPoll + 1) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Waiting for [{}] ms for upload [{}] to complete " + "(try {} of {})", waitMillis, upload.getId(), timesPolled + 1, timesToPoll); } Thread.sleep(waitMillis); } } catch (InterruptedException e) { /* We assume the client has written logic for when the polling operation * doesn't complete within the time period as expected and we also make * the assumption that that behavior would be acceptable when the thread * has been interrupted. */ return executeWhenTimesToPollExceeded.apply(upload.getId()); } } if (timesPolled >= timesToPoll) { return executeWhenTimesToPollExceeded.apply(upload.getId()); } return null; }
From source file:org.graylog2.bindings.providers.JestClientProvider.java
@Inject public JestClientProvider(@Named("elasticsearch_hosts") List<URI> elasticsearchHosts, @Named("elasticsearch_connect_timeout") Duration elasticsearchConnectTimeout, @Named("elasticsearch_socket_timeout") Duration elasticsearchSocketTimeout, @Named("elasticsearch_idle_timeout") Duration elasticsearchIdleTimeout, @Named("elasticsearch_max_total_connections") int elasticsearchMaxTotalConnections, @Named("elasticsearch_max_total_connections_per_route") int elasticsearchMaxTotalConnectionsPerRoute, @Named("elasticsearch_discovery_enabled") boolean discoveryEnabled, @Named("elasticsearch_discovery_filter") @Nullable String discoveryFilter, @Named("elasticsearch_discovery_frequency") Duration discoveryFrequency, Gson gson) { this.factory = new JestClientFactory(); this.credentialsProvider = new BasicCredentialsProvider(); final List<String> hosts = elasticsearchHosts.stream().map(hostUri -> { if (!Strings.isNullOrEmpty(hostUri.getUserInfo())) { final Iterator<String> splittedUserInfo = Splitter.on(":").split(hostUri.getUserInfo()).iterator(); if (splittedUserInfo.hasNext()) { final String username = splittedUserInfo.next(); final String password = splittedUserInfo.hasNext() ? splittedUserInfo.next() : null; credentialsProvider// www. ja v a2s. c o m .setCredentials( new AuthScope(hostUri.getHost(), hostUri.getPort(), AuthScope.ANY_REALM, hostUri.getScheme()), new UsernamePasswordCredentials(username, password)); } } return hostUri.toString(); }).collect(Collectors.toList()); final HttpClientConfig.Builder httpClientConfigBuilder = new HttpClientConfig.Builder(hosts) .credentialsProvider(credentialsProvider) .connTimeout(Math.toIntExact(elasticsearchConnectTimeout.toMillis())) .readTimeout(Math.toIntExact(elasticsearchSocketTimeout.toMillis())) .maxConnectionIdleTime(elasticsearchIdleTimeout.getSeconds(), TimeUnit.SECONDS) .maxTotalConnection(elasticsearchMaxTotalConnections) .defaultMaxTotalConnectionPerRoute(elasticsearchMaxTotalConnectionsPerRoute).multiThreaded(true) .discoveryEnabled(discoveryEnabled).discoveryFilter(discoveryFilter) .discoveryFrequency(discoveryFrequency.getSeconds(), TimeUnit.SECONDS).gson(gson); factory.setHttpClientConfig(httpClientConfigBuilder.build()); }
From source file:dk.dma.vessel.track.store.AisStoreClient.java
public List<PastTrackPos> getPastTrack(int mmsi, Integer minDist, Duration age) { // Determine URL age = age != null ? age : Duration.parse(pastTrackTtl); minDist = minDist == null ? Integer.valueOf(pastTrackMinDist) : minDist; ZonedDateTime now = ZonedDateTime.now(); String from = now.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime end = now.minus(age); String to = end.format(DateTimeFormatter.ISO_INSTANT); String interval = String.format("%s/%s", to, from); String url = String.format("%s?mmsi=%d&interval=%s", aisViewUrl, mmsi, interval); final List<PastTrackPos> track = new ArrayList<>(); try {/*w w w.j a va 2 s .co m*/ long t0 = System.currentTimeMillis(); // TEST url = url + "&filter=" + URLEncoder.encode("(s.country not in (GBR)) & (s.region!=808)", "UTF-8"); // Set up a few timeouts and fetch the attachment URLConnection con = new URL(url).openConnection(); con.setConnectTimeout(10 * 1000); // 10 seconds con.setReadTimeout(60 * 1000); // 1 minute if (!StringUtils.isEmpty(aisAuthHeader)) { con.setRequestProperty("Authorization", aisAuthHeader); } try (InputStream in = con.getInputStream(); BufferedInputStream bin = new BufferedInputStream(in)) { AisReader aisReader = AisReaders.createReaderFromInputStream(bin); aisReader.registerPacketHandler(new Consumer<AisPacket>() { @Override public void accept(AisPacket p) { AisMessage message = p.tryGetAisMessage(); if (message == null || !(message instanceof IVesselPositionMessage)) { return; } VesselTarget target = new VesselTarget(); target.merge(p, message); if (!target.checkValidPos()) { return; } track.add(new PastTrackPos(target.getLat(), target.getLon(), target.getCog(), target.getSog(), target.getLastPosReport())); } }); aisReader.start(); try { aisReader.join(); } catch (InterruptedException e) { return null; } } LOG.info(String.format("Read %d past track positions in %d ms", track.size(), System.currentTimeMillis() - t0)); } catch (IOException e) { LOG.error("Failed to make REST query: " + url); throw new InternalError("REST endpoint failed"); } LOG.info("AisStore returned track with " + track.size() + " points"); return PastTrack.downSample(track, minDist, age.toMillis()); }
From source file:org.apache.flink.table.runtime.window.assigners.SlidingWindowAssigner.java
/** * Creates a new {@code SlidingEventTimeWindows} {@link org.apache.flink.streaming.api.windowing.assigners.WindowAssigner} that assigns * elements to sliding time windows based on the element timestamp. * * @param size The size of the generated windows. * @param slide The slide interval of the generated windows. * @return The time policy.//from w ww. ja v a2s .co m */ public static SlidingWindowAssigner of(Duration size, Duration slide) { return new SlidingWindowAssigner(size.toMillis(), slide.toMillis(), 0, true); }
From source file:org.apache.flink.table.runtime.window.assigners.SlidingWindowAssigner.java
public SlidingWindowAssigner withOffset(Duration offset) { return new SlidingWindowAssigner(size, slide, offset.toMillis(), isEventTime); }
From source file:org.apache.james.mailbox.tika.CachingTextExtractor.java
public CachingTextExtractor(TextExtractor underlying, Duration cacheEvictionPeriod, Long cacheWeightInBytes, MetricFactory metricFactory, GaugeRegistry gaugeRegistry) { this.underlying = underlying; this.weightMetric = metricFactory.generate("textExtractor.cache.weight"); Weigher<String, ParsedContent> weigher = (key, parsedContent) -> computeWeight(parsedContent); RemovalListener<String, ParsedContent> removalListener = notification -> Optional .ofNullable(notification.getValue()).map(this::computeWeight).ifPresent(weightMetric::remove); this.cache = CacheBuilder.newBuilder() .expireAfterAccess(cacheEvictionPeriod.toMillis(), TimeUnit.MILLISECONDS) .maximumWeight(cacheWeightInBytes).weigher(weigher).recordStats().removalListener(removalListener) .build();//from ww w . ja v a2 s.com recordStats(gaugeRegistry); }
From source file:org.apache.nifi.avro.AvroTypeUtil.java
@SuppressWarnings("unchecked") private static Object convertToAvroObject(final Object rawValue, final Schema fieldSchema, final String fieldName, final Charset charset) { if (rawValue == null) { return null; }// ww w . j av a2s . co m switch (fieldSchema.getType()) { case INT: { final LogicalType logicalType = fieldSchema.getLogicalType(); if (logicalType == null) { return DataTypeUtils.toInteger(rawValue, fieldName); } if (LOGICAL_TYPE_DATE.equals(logicalType.getName())) { final String format = AvroTypeUtil.determineDataType(fieldSchema).getFormat(); final Date date = DataTypeUtils.toDate(rawValue, () -> DataTypeUtils.getDateFormat(format), fieldName); final Duration duration = Duration.between(new Date(0L).toInstant(), new Date(date.getTime()).toInstant()); final long days = duration.toDays(); return (int) days; } else if (LOGICAL_TYPE_TIME_MILLIS.equals(logicalType.getName())) { final String format = AvroTypeUtil.determineDataType(fieldSchema).getFormat(); final Time time = DataTypeUtils.toTime(rawValue, () -> DataTypeUtils.getDateFormat(format), fieldName); final Date date = new Date(time.getTime()); final Duration duration = Duration.between(date.toInstant().truncatedTo(ChronoUnit.DAYS), date.toInstant()); final long millisSinceMidnight = duration.toMillis(); return (int) millisSinceMidnight; } return DataTypeUtils.toInteger(rawValue, fieldName); } case LONG: { final LogicalType logicalType = fieldSchema.getLogicalType(); if (logicalType == null) { return DataTypeUtils.toLong(rawValue, fieldName); } if (LOGICAL_TYPE_TIME_MICROS.equals(logicalType.getName())) { final long longValue = getLongFromTimestamp(rawValue, fieldSchema, fieldName); final Date date = new Date(longValue); final Duration duration = Duration.between(date.toInstant().truncatedTo(ChronoUnit.DAYS), date.toInstant()); return duration.toMillis() * 1000L; } else if (LOGICAL_TYPE_TIMESTAMP_MILLIS.equals(logicalType.getName())) { final String format = AvroTypeUtil.determineDataType(fieldSchema).getFormat(); Timestamp t = DataTypeUtils.toTimestamp(rawValue, () -> DataTypeUtils.getDateFormat(format), fieldName); return getLongFromTimestamp(rawValue, fieldSchema, fieldName); } else if (LOGICAL_TYPE_TIMESTAMP_MICROS.equals(logicalType.getName())) { return getLongFromTimestamp(rawValue, fieldSchema, fieldName) * 1000L; } return DataTypeUtils.toLong(rawValue, fieldName); } case BYTES: case FIXED: final LogicalType logicalType = fieldSchema.getLogicalType(); if (logicalType != null && LOGICAL_TYPE_DECIMAL.equals(logicalType.getName())) { final LogicalTypes.Decimal decimalType = (LogicalTypes.Decimal) logicalType; final BigDecimal rawDecimal; if (rawValue instanceof BigDecimal) { rawDecimal = (BigDecimal) rawValue; } else if (rawValue instanceof Double) { rawDecimal = BigDecimal.valueOf((Double) rawValue); } else if (rawValue instanceof String) { rawDecimal = new BigDecimal((String) rawValue); } else if (rawValue instanceof Integer) { rawDecimal = new BigDecimal((Integer) rawValue); } else if (rawValue instanceof Long) { rawDecimal = new BigDecimal((Long) rawValue); } else { throw new IllegalTypeConversionException("Cannot convert value " + rawValue + " of type " + rawValue.getClass() + " to a logical decimal"); } // If the desired scale is different than this value's coerce scale. final int desiredScale = decimalType.getScale(); final BigDecimal decimal = rawDecimal.scale() == desiredScale ? rawDecimal : rawDecimal.setScale(desiredScale, BigDecimal.ROUND_HALF_UP); return new Conversions.DecimalConversion().toBytes(decimal, fieldSchema, logicalType); } if (rawValue instanceof byte[]) { return ByteBuffer.wrap((byte[]) rawValue); } if (rawValue instanceof String) { return ByteBuffer.wrap(((String) rawValue).getBytes(charset)); } if (rawValue instanceof Object[]) { return AvroTypeUtil.convertByteArray((Object[]) rawValue); } else { throw new IllegalTypeConversionException("Cannot convert value " + rawValue + " of type " + rawValue.getClass() + " to a ByteBuffer"); } case MAP: if (rawValue instanceof Record) { final Record recordValue = (Record) rawValue; final Map<String, Object> map = new HashMap<>(); for (final RecordField recordField : recordValue.getSchema().getFields()) { final Object v = recordValue.getValue(recordField); if (v != null) { map.put(recordField.getFieldName(), v); } } return map; } else if (rawValue instanceof Map) { final Map<String, Object> objectMap = (Map<String, Object>) rawValue; final Map<String, Object> map = new HashMap<>(objectMap.size()); for (final String s : objectMap.keySet()) { final Object converted = convertToAvroObject(objectMap.get(s), fieldSchema.getValueType(), fieldName + "[" + s + "]", charset); map.put(s, converted); } return map; } else { throw new IllegalTypeConversionException( "Cannot convert value " + rawValue + " of type " + rawValue.getClass() + " to a Map"); } case RECORD: final GenericData.Record avroRecord = new GenericData.Record(fieldSchema); final Record record = (Record) rawValue; for (final RecordField recordField : record.getSchema().getFields()) { final Object recordFieldValue = record.getValue(recordField); final String recordFieldName = recordField.getFieldName(); final Field field = fieldSchema.getField(recordFieldName); if (field == null) { continue; } final Object converted = convertToAvroObject(recordFieldValue, field.schema(), fieldName + "/" + recordFieldName, charset); avroRecord.put(recordFieldName, converted); } return avroRecord; case UNION: return convertUnionFieldValue(rawValue, fieldSchema, schema -> convertToAvroObject(rawValue, schema, fieldName, charset), fieldName); case ARRAY: final Object[] objectArray = (Object[]) rawValue; final List<Object> list = new ArrayList<>(objectArray.length); int i = 0; for (final Object o : objectArray) { final Object converted = convertToAvroObject(o, fieldSchema.getElementType(), fieldName + "[" + i + "]", charset); list.add(converted); i++; } return list; case BOOLEAN: return DataTypeUtils.toBoolean(rawValue, fieldName); case DOUBLE: return DataTypeUtils.toDouble(rawValue, fieldName); case FLOAT: return DataTypeUtils.toFloat(rawValue, fieldName); case NULL: return null; case ENUM: return new GenericData.EnumSymbol(fieldSchema, rawValue); case STRING: return DataTypeUtils.toString(rawValue, (String) null, charset); } return rawValue; }