List of usage examples for java.util.concurrent TimeUnit HOURS
TimeUnit HOURS
To view the source code for java.util.concurrent TimeUnit HOURS.
Click Source Link
From source file:dk.dma.ais.store.FileExportRest.java
private void printDownloadStatus() { // Determine how far left we have to go // We know current time long currentTime = System.currentTimeMillis(); // How long have we been running long milisecondsRunning = currentTime - timeStart; // Calculate total % done: // Total AIS time to process in miliseconds long aisTimeTotalOriginal = intervalVar.getEndMillis() - intervalStartTime; // Stretch of processed AIS time: long processedAisTimeOriginal = lastFlushTimestamp - intervalStartTime; double percentDoneOriginal = (double) processedAisTimeOriginal / (double) aisTimeTotalOriginal * 100; // Calculate the estimated time // Total AIS time to process in miliseconds long aisTimeTotal = intervalVar.getEndMillis() - lastLoadedTimestamp; // Stretch of processed AIS time: long processedAisTime = lastFlushTimestamp - lastLoadedTimestamp; double percentDoneNow = (double) processedAisTime / (double) aisTimeTotal * 100; double goToPercent = (100 - percentDoneNow); // How many % do we calculate pr. milisecond double percentPrMilisecond = percentDoneNow / ((double) milisecondsRunning); double timeLeft = goToPercent / percentPrMilisecond; long millis = (long) timeLeft; String timeLeftStr = String.format("%02d:%02d:%02d", TimeUnit.MILLISECONDS.toHours(millis), TimeUnit.MILLISECONDS.toMinutes(millis) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(millis)), TimeUnit.MILLISECONDS.toSeconds(millis) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(millis))); String percentDoneStr = decimalFormatter.format(percentDoneOriginal) + "%"; // String percentDoneStr = ((double) ((int) (percentDoneOriginal * 100))) / 100 + "%"; // String part1DownloadMessage = "Downloading AIS Data " + percentDoneStr; // String part2DownloadMessage = " Estimated Time Left: " + timeLeftStr; // if (part1DownloadMessage.length() < ) printAisStore("Downloading AIS Data " + percentDoneStr + " Estimated Time Left: " + timeLeftStr + "\r"); // printAisStore(); // int seconds = (int) (timeLeft / 1000) % 60; // int minutes = (int) ((timeLeft / (1000 * 60)) % 60); // int hours = (int) ((timeLeft / (1000 * 60 * 60)) % 24); // System.out.println(hours + ":" + minutes + ":" + seconds); // System.out.println("Miliseconds running" + milisecondsRunning); ////from w ww . j a va 2 s . co m // // AIS Time left of request // long aisTimeLeft = intervalVar.getEndMillis() - lastFlushTimestamp; // // // How long have we spent parsing up to now // long aisTimeParsed = processedAisTime / milisecondsRunning; // // System.out.println("We have in " + milisecondsRunning + " miliseconds processed " + processedAisTime + " AIS Interval"); // System.out.println("AIS Time Left: " + aisTimeLeft); // System.out.println("aisTimeParsed " + aisTimeParsed); // counterCurrent.get(); // // If we are resuming our % will be further along // We know our count since start // We know how long we have been running }
From source file:com.persinity.ndt.datamutator.DataMutator.java
private String formatRunningTime() { final long elapsedMs = runningTime.elapsed(TimeUnit.MILLISECONDS); final String res; if (runningTime.elapsed(TimeUnit.DAYS) > 0) { res = DurationFormatUtils.formatDuration(elapsedMs, "dd:HH:mm:ss"); } else if (runningTime.elapsed(TimeUnit.HOURS) > 0) { res = DurationFormatUtils.formatDuration(elapsedMs, "HH:mm:ss"); } else {/* ww w.j av a2 s .c o m*/ res = DurationFormatUtils.formatDuration(elapsedMs, "mm:ss"); } return res; }
From source file:com.brightcove.player.samples.offlineplayback.VideoListAdapter.java
/** * Converts the given duration into a time span string. * * @param duration elapsed time as number of milliseconds. * @return the formatted time span string. */// www . j ava 2s .c o m @NonNull public static String millisecondsToString(long duration) { final TimeUnit scale = TimeUnit.MILLISECONDS; StringBuilder builder = new StringBuilder(); long days = scale.toDays(duration); duration -= TimeUnit.DAYS.toMillis(days); if (days > 0) { builder.append(days); builder.append(days > 1 ? " days " : " day "); } long hours = scale.toHours(duration); duration -= TimeUnit.HOURS.toMillis(hours); if (hours > 0) { builder.append(String.format("%02d:", hours)); } long minutes = scale.toMinutes(duration); duration -= TimeUnit.MINUTES.toMillis(minutes); long seconds = scale.toSeconds(duration); builder.append(String.format("%02d:%02d", minutes, seconds)); return builder.toString(); }
From source file:eu.tango.energymodeller.energypredictor.AbstractEnergyPredictor.java
@Override /**/*from w w w .j ava 2 s. c o m*/ * This provides a prediction of how much energy is to be used by a application, * over the next hour. * * @param app The application to be deployed * @param applications The giving a workload on the host machine * @param host The host that the applications will be running on * @return The prediction of the energy to be used. */ public EnergyUsagePrediction getApplicationPredictedEnergy(ApplicationOnHost app, Collection<ApplicationOnHost> applications, Host host) { TimePeriod duration = new TimePeriod(new GregorianCalendar(), TimeUnit.HOURS.toSeconds(1)); return getApplicationPredictedEnergy(app, applications, host, duration); }
From source file:org.apache.pulsar.compaction.CompactionTest.java
@Test public void testBatchMessageIdsDontChange() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; // subscribe before sending anything, so that we get all messages pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close(); try (Producer<byte[]> producer = pulsarClient.newProducer().topic(topic).maxPendingMessages(3) .enableBatching(true).batchingMaxMessages(3).batchingMaxPublishDelay(1, TimeUnit.HOURS) .messageRoutingMode(MessageRoutingMode.SinglePartition).create()) { producer.newMessage().key("key1").value("my-message-1".getBytes()).sendAsync(); producer.newMessage().key("key2").value("my-message-2".getBytes()).sendAsync(); producer.newMessage().key("key2").value("my-message-3".getBytes()).send(); }/* www . j a va 2 s.c om*/ // Read messages before compaction to get ids List<Message<byte[]>> messages = new ArrayList<>(); try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscribe()) { messages.add(consumer.receive()); messages.add(consumer.receive()); messages.add(consumer.receive()); } // Ensure all messages are in same batch Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getLedgerId(), ((BatchMessageIdImpl) messages.get(1).getMessageId()).getLedgerId()); Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getLedgerId(), ((BatchMessageIdImpl) messages.get(2).getMessageId()).getLedgerId()); Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getEntryId(), ((BatchMessageIdImpl) messages.get(1).getMessageId()).getEntryId()); Assert.assertEquals(((BatchMessageIdImpl) messages.get(0).getMessageId()).getEntryId(), ((BatchMessageIdImpl) messages.get(2).getMessageId()).getEntryId()); // compact the topic Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); // Check that messages after compaction have same ids try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscribe()) { Message<byte[]> message1 = consumer.receive(); Assert.assertEquals(message1.getKey(), "key1"); Assert.assertEquals(new String(message1.getData()), "my-message-1"); Assert.assertEquals(message1.getMessageId(), messages.get(0).getMessageId()); Message<byte[]> message2 = consumer.receive(); Assert.assertEquals(message2.getKey(), "key2"); Assert.assertEquals(new String(message2.getData()), "my-message-3"); Assert.assertEquals(message2.getMessageId(), messages.get(2).getMessageId()); } }
From source file:org.apache.carbondata.processing.store.writer.AbstractFactDataWriter.java
/** * This method will close the executor service which is used for copying carbon * data files to carbon store path//from w ww . j av a 2s . co m * * @throws CarbonDataWriterException */ protected void closeExecutorService() throws CarbonDataWriterException { executorService.shutdown(); try { executorService.awaitTermination(2, TimeUnit.HOURS); } catch (InterruptedException e) { throw new CarbonDataWriterException(e.getMessage()); } for (int i = 0; i < executorServiceSubmitList.size(); i++) { try { executorServiceSubmitList.get(i).get(); } catch (InterruptedException e) { throw new CarbonDataWriterException(e.getMessage()); } catch (ExecutionException e) { throw new CarbonDataWriterException(e.getMessage()); } } }
From source file:com.ibm.og.client.ApacheClient.java
private Runnable getShutdownRunnable(final SettableFuture<Boolean> future, final boolean immediate) { return new Runnable() { @Override//from w w w . ja v a2s .c o m public void run() { if (immediate) { closeSockets(); } shutdownClient(); future.set(true); } private void closeSockets() { try { _logger.info("Attempting to close client connection pool"); ApacheClient.this.client.close(); _logger.info("Client connection pool is closed"); } catch (final IOException e) { _logger.error("Error closing client connection pool", e); } } private void shutdownClient() { _logger.info("Issuing client shutdown"); ApacheClient.this.executorService.shutdown(); while (!ApacheClient.this.executorService.isTerminated()) { awaitShutdown(1, TimeUnit.HOURS); } _logger.info("Client is shutdown"); _logger.info("Number of requests aborted at shutdown [{}]", ApacheClient.this.abortedRequestsAtShutdown.get()); } private void awaitShutdown(final long timeout, final TimeUnit unit) { try { _logger.info("Awaiting client executor service termination for {} {}", timeout, unit); final boolean result = ApacheClient.this.executorService.awaitTermination(timeout, unit); _logger.info("Client executor service termination result [{}]", result ? "success" : "failure"); } catch (final InterruptedException e) { _logger.error("Interrupted while waiting for client executor service termination", e); } } }; }
From source file:org.apache.nifi.web.api.AccessResource.java
private long validateTokenExpiration(long proposedTokenExpiration, String identity) { final long maxExpiration = TimeUnit.MILLISECONDS.convert(12, TimeUnit.HOURS); final long minExpiration = TimeUnit.MILLISECONDS.convert(1, TimeUnit.MINUTES); if (proposedTokenExpiration > maxExpiration) { logger.warn(String.format("Max token expiration exceeded. Setting expiration to %s from %s for %s", maxExpiration, proposedTokenExpiration, identity)); proposedTokenExpiration = maxExpiration; } else if (proposedTokenExpiration < minExpiration) { logger.warn(String.format("Min token expiration not met. Setting expiration to %s from %s for %s", minExpiration, proposedTokenExpiration, identity)); proposedTokenExpiration = minExpiration; }/*from w w w . ja v a2s. c o m*/ return proposedTokenExpiration; }
From source file:de.yaacc.upnp.server.contentdirectory.YaaccContentDirectory.java
public String formatDuration(String millisStr) { String res = ""; long duration = Long.valueOf(millisStr); long hours = TimeUnit.MILLISECONDS.toHours(duration) - TimeUnit.DAYS.toHours(TimeUnit.MILLISECONDS.toDays(duration)); long minutes = TimeUnit.MILLISECONDS.toMinutes(duration) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(duration)); long seconds = TimeUnit.MILLISECONDS.toSeconds(duration) - TimeUnit.MINUTES.toSeconds(TimeUnit.MILLISECONDS.toMinutes(duration)); res = String.format(Locale.US, "%02d:%02d:%02d", hours, minutes, seconds); return res;//from ww w .j a v a2s. c o m // Date d = new Date(Long.parseLong(millis)); // SimpleDateFormat df = new SimpleDateFormat("HH:mm:ss"); // return df.format(d); }
From source file:org.apache.pulsar.compaction.CompactionTest.java
@Test public void testWholeBatchCompactedOut() throws Exception { String topic = "persistent://my-property/use/my-ns/my-topic1"; // subscribe before sending anything, so that we get all messages pulsarClient.newConsumer().topic(topic).subscriptionName("sub1").readCompacted(true).subscribe().close(); try (Producer<byte[]> producerNormal = pulsarClient.newProducer().topic(topic).enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition).create(); Producer<byte[]> producerBatch = pulsarClient.newProducer().topic(topic).maxPendingMessages(3) .enableBatching(true).batchingMaxMessages(3).batchingMaxPublishDelay(1, TimeUnit.HOURS) .messageRoutingMode(MessageRoutingMode.SinglePartition).create()) { producerBatch.newMessage().key("key1").value("my-message-1".getBytes()).sendAsync(); producerBatch.newMessage().key("key1").value("my-message-2".getBytes()).sendAsync(); producerBatch.newMessage().key("key1").value("my-message-3".getBytes()).sendAsync(); producerNormal.newMessage().key("key1").value("my-message-4".getBytes()).send(); }/*from w ww . j av a 2s . c o m*/ // compact the topic Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler); compactor.compact(topic).get(); try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic).subscriptionName("sub1") .readCompacted(true).subscribe()) { Message<byte[]> message = consumer.receive(); Assert.assertEquals(message.getKey(), "key1"); Assert.assertEquals(new String(message.getData()), "my-message-4"); } }