Example usage for java.time Duration toMillis

List of usage examples for java.time Duration toMillis

Introduction

In this page you can find the example usage for java.time Duration toMillis.

Prototype

public long toMillis() 

Source Link

Document

Converts this duration to the total length in milliseconds.

Usage

From source file:org.apache.samza.runtime.LocalApplicationRunner.java

@Override
public boolean waitForFinish(Duration timeout) {
    long timeoutInMs = timeout.toMillis();
    boolean finished = true;

    try {/*from  w ww . ja v  a  2s  . co m*/
        if (timeoutInMs < 1) {
            shutdownLatch.await();
        } else {
            finished = shutdownLatch.await(timeoutInMs, TimeUnit.MILLISECONDS);

            if (!finished) {
                LOG.warn("Timed out waiting for application to finish.");
            }
        }
    } catch (Exception e) {
        LOG.error("Error waiting for application to finish", e);
        throw new SamzaException(e);
    }

    return finished;
}

From source file:org.apache.samza.test.framework.TestRunner.java

/**
 * Gets the contents of the output stream represented by {@code outputDescriptor} after {@link TestRunner#run(Duration)}
 * has completed/*  w  w w  . j  a va  2  s  .  com*/
 *
 * @param outputDescriptor describes the stream to be consumed
 * @param timeout timeout for consumption of stream in Ms
 * @param <StreamMessageType> type of message
 *
 * @return a map whose key is {@code partitionId} and value is messages in partition
 * @throws SamzaException Thrown when a poll is incomplete
 */
public static <StreamMessageType> Map<Integer, List<StreamMessageType>> consumeStream(
        InMemoryOutputDescriptor outputDescriptor, Duration timeout) throws SamzaException {
    Preconditions.checkNotNull(outputDescriptor);
    String streamId = outputDescriptor.getStreamId();
    String systemName = outputDescriptor.getSystemName();
    Set<SystemStreamPartition> ssps = new HashSet<>();
    Set<String> streamIds = new HashSet<>();
    streamIds.add(streamId);
    SystemFactory factory = new InMemorySystemFactory();
    Config config = new MapConfig(outputDescriptor.toConfig(),
            outputDescriptor.getSystemDescriptor().toConfig());
    Map<String, SystemStreamMetadata> metadata = factory.getAdmin(systemName, config)
            .getSystemStreamMetadata(streamIds);
    SystemConsumer consumer = factory.getConsumer(systemName, config, null);
    String name = (String) outputDescriptor.getPhysicalName().orElse(streamId);
    metadata.get(name).getSystemStreamPartitionMetadata().keySet().forEach(partition -> {
        SystemStreamPartition temp = new SystemStreamPartition(systemName, streamId, partition);
        ssps.add(temp);
        consumer.register(temp, "0");
    });

    long t = System.currentTimeMillis();
    Map<SystemStreamPartition, List<IncomingMessageEnvelope>> output = new HashMap<>();
    HashSet<SystemStreamPartition> didNotReachEndOfStream = new HashSet<>(ssps);
    while (System.currentTimeMillis() < t + timeout.toMillis()) {
        Map<SystemStreamPartition, List<IncomingMessageEnvelope>> currentState = null;
        try {
            currentState = consumer.poll(ssps, 10);
        } catch (InterruptedException e) {
            throw new SamzaException("Timed out while consuming stream \n" + e.getMessage());
        }
        for (Map.Entry<SystemStreamPartition, List<IncomingMessageEnvelope>> entry : currentState.entrySet()) {
            SystemStreamPartition ssp = entry.getKey();
            output.computeIfAbsent(ssp, k -> new LinkedList<IncomingMessageEnvelope>());
            List<IncomingMessageEnvelope> currentBuffer = entry.getValue();
            Integer totalMessagesToFetch = Integer.valueOf(metadata.get(outputDescriptor.getStreamId())
                    .getSystemStreamPartitionMetadata().get(ssp.getPartition()).getNewestOffset());
            if (output.get(ssp).size() + currentBuffer.size() == totalMessagesToFetch) {
                didNotReachEndOfStream.remove(entry.getKey());
                ssps.remove(entry.getKey());
            }
            output.get(ssp).addAll(currentBuffer);
        }
        if (didNotReachEndOfStream.isEmpty()) {
            break;
        }
    }

    if (!didNotReachEndOfStream.isEmpty()) {
        throw new IllegalStateException("Could not poll for all system stream partitions");
    }

    return output.entrySet().stream().collect(
            Collectors.toMap(entry -> entry.getKey().getPartition().getPartitionId(), entry -> entry.getValue()
                    .stream().map(e -> (StreamMessageType) e.getMessage()).collect(Collectors.toList())));
}

From source file:org.codice.alliance.distribution.sdk.video.stream.mpegts.MpegTsUdpClient.java

public static void main(String[] args) {

    String[] arguments = args[0].split(",");

    if (arguments.length < 1) {
        logErrorMessage("Unable to start stream : no arguments specified.", true);
        return;/* w ww .  j  ava2  s.  co m*/
    }

    String videoFilePath = arguments[0];
    if (StringUtils.isBlank(videoFilePath)) {
        logErrorMessage("Unable to start stream : no video file path specified.", true);
        return;
    }

    String ip;
    int port;

    if (arguments.length == 1) {
        ip = DEFAULT_IP;
        port = DEFAULT_PORT;
        LOGGER.warn("No IP or port provided.  Using defaults : {}:{}", DEFAULT_IP, DEFAULT_PORT);
    } else if (arguments.length == 2) {
        ip = arguments[1];
        port = DEFAULT_PORT;
        LOGGER.warn("No port provided.  Using default : {}", DEFAULT_PORT);
    } else {
        ip = arguments[1];
        try {
            port = Integer.parseInt(arguments[2]);
        } catch (NumberFormatException e) {
            LOGGER.warn("Unable to parse specified port : {}.  Using Default : {}", arguments[2], DEFAULT_PORT);
            port = DEFAULT_PORT;
        }
    }

    LOGGER.info("Video file path : {}", videoFilePath);

    LOGGER.info("Streaming address : {}:{}", ip, port);

    final AtomicLong count = new AtomicLong(0);

    EventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    try {

        Bootstrap bootstrap = new Bootstrap();

        bootstrap.group(eventLoopGroup).channel(NioDatagramChannel.class)
                .option(ChannelOption.SO_BROADCAST, true)
                .handler(new SimpleChannelInboundHandler<DatagramPacket>() {
                    @Override
                    protected void channelRead0(ChannelHandlerContext channelHandlerContext,
                            DatagramPacket datagramPacket) throws Exception {
                        LOGGER.info("Reading datagram from channel");
                    }

                    @Override
                    public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
                        logErrorMessage(String.format("Exception occured handling datagram packet.  %s", cause),
                                false);
                        ctx.close();
                    }
                });

        Channel ch = bootstrap.bind(0).sync().channel();

        File videoFile = new File(videoFilePath);

        long bytesSent = 0;

        long tsPacketCount = videoFile.length() / PACKET_SIZE;

        Duration videoDuration = getVideoDuration(videoFilePath);
        if (videoDuration == null) {
            return;
        }

        long tsDurationMillis = videoDuration.toMillis();

        LOGGER.info("Video Duration : {}", tsDurationMillis);

        double delayPerPacket = (double) tsDurationMillis / (double) tsPacketCount;

        long startTime = System.currentTimeMillis();

        int packetsSent = 0;

        try (final InputStream fis = new BufferedInputStream(new FileInputStream(videoFile))) {

            byte[] buffer = new byte[PACKET_SIZE];
            int c;
            while ((c = fis.read(buffer)) != -1) {
                bytesSent += c;

                ChannelFuture cf = ch.writeAndFlush(
                        new DatagramPacket(Unpooled.copiedBuffer(buffer), new InetSocketAddress(ip, port)));

                cf.await();

                packetsSent++;

                if (packetsSent % 100 == 0) {
                    Thread.sleep((long) (delayPerPacket * 100));
                }
                if (packetsSent % 10000 == 0) {
                    LOGGER.info("Packet sent : {}", packetsSent);
                }
            }
        }

        long endTime = System.currentTimeMillis();

        LOGGER.info("Time Elapsed: {}", endTime - startTime);

        if (!ch.closeFuture().await(100)) {
            logErrorMessage("Channel time out", false);
        }

        LOGGER.info("Bytes sent : {} ", bytesSent);

    } catch (InterruptedException | IOException e) {
        logErrorMessage(String.format("Unable to generate stream : %s", e), false);
    } finally {
        // Shut down the event loop to terminate all threads.
        eventLoopGroup.shutdownGracefully();
    }

    LOGGER.info("count = " + count.get());
}

From source file:org.jbb.frontend.impl.format.DurationFormatter.java

@Override
public String print(Duration duration, Locale locale) {
    return DurationFormatUtils.formatDuration(duration.toMillis(), frontendProperties.durationFormatPattern());
}

From source file:org.springframework.data.redis.connection.lettuce.LettuceClusterConnection.java

/**
 * Creates new {@link LettuceClusterConnection} using {@link LettuceConnectionProvider} running commands across the
 * cluster via given {@link ClusterCommandExecutor}.
 *
 * @param connectionProvider must not be {@literal null}.
 * @param executor must not be {@literal null}.
 * @param timeout must not be {@literal null}.
 * @since 2.0/* w  ww. j av  a  2  s  .  c o m*/
 */
public LettuceClusterConnection(LettuceConnectionProvider connectionProvider, ClusterCommandExecutor executor,
        Duration timeout) {

    super(null, connectionProvider, timeout.toMillis(), 0);

    Assert.notNull(executor, "ClusterCommandExecutor must not be null.");
    Assert.isTrue(connectionProvider instanceof ClusterConnectionProvider,
            "LettuceConnectionProvider must be a ClusterConnectionProvider.");

    this.clusterClient = getClient();
    this.topologyProvider = new LettuceClusterTopologyProvider(this.clusterClient);
    this.clusterCommandExecutor = executor;
    this.disposeClusterCommandExecutorOnClose = false;
}

From source file:org.springframework.data.redis.connection.lettuce.LettuceClusterConnection.java

/**
 * Creates new {@link LettuceClusterConnection} given a shared {@link StatefulRedisClusterConnection} and
 * {@link LettuceConnectionProvider} running commands across the cluster via given {@link ClusterCommandExecutor}.
 *
 * @param sharedConnection may be {@literal null} if no shared connection used.
 * @param connectionProvider must not be {@literal null}.
 * @param clusterClient must not be {@literal null}.
 * @param executor must not be {@literal null}.
 * @param timeout must not be {@literal null}.
 * @since 2.1//from w  ww . j  a v a  2  s. co m
 */
LettuceClusterConnection(@Nullable StatefulRedisClusterConnection<byte[], byte[]> sharedConnection,
        LettuceConnectionProvider connectionProvider, RedisClusterClient clusterClient,
        ClusterCommandExecutor executor, Duration timeout) {

    super(sharedConnection, connectionProvider, timeout.toMillis(), 0);

    Assert.notNull(executor, "ClusterCommandExecutor must not be null.");
    Assert.notNull(clusterClient, "RedisClusterClient must not be null.");

    this.clusterClient = clusterClient;
    this.topologyProvider = new LettuceClusterTopologyProvider(clusterClient);
    this.clusterCommandExecutor = executor;
    this.disposeClusterCommandExecutorOnClose = false;
}

From source file:se.sawano.java.text.PerformanceIT.java

private long sortWith(final List<String> list, final Comparator<CharSequence> comparator) {
    long totalNew = 0;
    for (int i = 0; i < 500; ++i) {
        Collections.sort(list); // reset
        final Duration duration = whenSorting(list).using(comparator);
        totalNew += duration.toMillis();
    }/*from  w  w  w  .j av a2  s  .c  o m*/
    return totalNew;
}