Example usage for java.time Duration ofMillis

List of usage examples for java.time Duration ofMillis

Introduction

In this page you can find the example usage for java.time Duration ofMillis.

Prototype

public static Duration ofMillis(long millis) 

Source Link

Document

Obtains a Duration representing a number of milliseconds.

Usage

From source file:org.springframework.cloud.gateway.test.websocket.WebSocketIntegrationTests.java

@Test
public void echoForHttp() throws Exception {
    int count = 100;
    Flux<String> input = Flux.range(1, count).map(index -> "msg-" + index);
    ReplayProcessor<Object> output = ReplayProcessor.create(count);

    client.execute(getHttpUrl("/echoForHttp"), session -> {
        logger.debug("Starting to send messages");
        return session.send(input.doOnNext(s -> logger.debug("outbound " + s)).map(s -> session.textMessage(s)))
                .thenMany(session.receive().take(count).map(WebSocketMessage::getPayloadAsText))
                .subscribeWith(output).doOnNext(s -> logger.debug("inbound " + s)).then().doOnSuccessOrError(
                        (aVoid, ex) -> logger.debug("Done with " + (ex != null ? ex.getMessage() : "success")));
    }).block(Duration.ofMillis(5000));

    assertThat(output.collectList().block(Duration.ofMillis(5000)))
            .isEqualTo(input.collectList().block(Duration.ofMillis(5000)));
}

From source file:org.springframework.cloud.gateway.test.websocket.WebSocketIntegrationTests.java

@Test
public void subProtocol() throws Exception {
    String protocol = "echo-v1";
    String protocol2 = "echo-v2";
    AtomicReference<HandshakeInfo> infoRef = new AtomicReference<>();
    MonoProcessor<Object> output = MonoProcessor.create();

    client.execute(getUrl("/sub-protocol"), new WebSocketHandler() {
        @Override//from www .  ja  v a2  s  .co  m
        public List<String> getSubProtocols() {
            return Arrays.asList(protocol, protocol2);
        }

        @Override
        public Mono<Void> handle(WebSocketSession session) {
            infoRef.set(session.getHandshakeInfo());
            return session.receive().map(WebSocketMessage::getPayloadAsText).subscribeWith(output).then();
        }
    }).block(Duration.ofMillis(5000));

    HandshakeInfo info = infoRef.get();
    assertThat(info.getHeaders().getFirst("Upgrade")).isEqualToIgnoringCase("websocket");

    assertThat(info.getHeaders().getFirst("Sec-WebSocket-Protocol")).isEqualTo(protocol);
    assertThat(info.getSubProtocol()).as("Wrong protocol accepted").isEqualTo(protocol);
    assertThat(output.block(Duration.ofSeconds(5))).as("Wrong protocol detected on the server side")
            .isEqualTo(protocol);
}

From source file:org.springframework.cloud.gateway.test.websocket.WebSocketIntegrationTests.java

@Test
public void customHeader() throws Exception {
    HttpHeaders headers = new HttpHeaders();
    headers.add("my-header", "my-value");
    MonoProcessor<Object> output = MonoProcessor.create();

    client.execute(getUrl("/custom-header"), headers,
            session -> session.receive().map(WebSocketMessage::getPayloadAsText).subscribeWith(output).then())
            .block(Duration.ofMillis(5000));

    assertThat(output.block(Duration.ofMillis(5000))).isEqualTo("my-header:my-value");
}

From source file:org.springframework.cloud.gateway.test.websocket.WebSocketIntegrationTests.java

@Test
public void sessionClosing() throws Exception {
    this.client.execute(getUrl("/close"), session -> {
        logger.debug("Starting..");
        return session.receive().doOnNext(s -> logger.debug("inbound " + s)).then().doFinally(signalType -> {
            logger.debug("Completed with: " + signalType);
        });//from  w w  w .  ja v a 2s  . c o  m
    }).block(Duration.ofMillis(5000));
}

From source file:org.springframework.data.redis.connection.lettuce.LettucePoolingClientConfigurationUnitTests.java

@Test // DATAREDIS-667
public void shouldCreateEmptyConfiguration() {

    LettucePoolingClientConfiguration configuration = LettucePoolingClientConfiguration.defaultConfiguration();

    assertThat(configuration.getPoolConfig()).isNotNull();
    assertThat(configuration.isUseSsl()).isFalse();
    assertThat(configuration.isVerifyPeer()).isTrue();
    assertThat(configuration.isStartTls()).isFalse();
    assertThat(configuration.getClientOptions()).isEmpty();
    assertThat(configuration.getClientResources()).isEmpty();
    assertThat(configuration.getCommandTimeout()).isEqualTo(Duration.ofSeconds(60));
    assertThat(configuration.getShutdownTimeout()).isEqualTo(Duration.ofMillis(100));
    assertThat(configuration.getShutdownQuietPeriod()).isEqualTo(Duration.ofMillis(100));
}

From source file:org.springframework.kafka.listener.TransactionalContainerTests.java

@SuppressWarnings("unchecked")
@Test/*from w w w.jav a  2s  .  c  om*/
public void testRollbackRecord() throws Exception {
    logger.info("Start testRollbackRecord");
    Map<String, Object> props = KafkaTestUtils.consumerProps("txTest1", "false", embeddedKafka);
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "group");
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
    DefaultKafkaConsumerFactory<Integer, String> cf = new DefaultKafkaConsumerFactory<>(props);
    ContainerProperties containerProps = new ContainerProperties(topic1, topic2);
    containerProps.setGroupId("group");
    containerProps.setPollTimeout(10_000);

    Map<String, Object> senderProps = KafkaTestUtils.producerProps(embeddedKafka);
    senderProps.put(ProducerConfig.RETRIES_CONFIG, 1);
    DefaultKafkaProducerFactory<Integer, String> pf = new DefaultKafkaProducerFactory<>(senderProps);
    pf.setTransactionIdPrefix("rr.");

    final KafkaTemplate<Integer, String> template = new KafkaTemplate<>(pf);
    final AtomicBoolean failed = new AtomicBoolean();
    final CountDownLatch latch = new CountDownLatch(3);
    final AtomicReference<String> transactionalId = new AtomicReference<>();
    containerProps.setMessageListener((MessageListener<Integer, String>) message -> {
        latch.countDown();
        if (failed.compareAndSet(false, true)) {
            throw new RuntimeException("fail");
        }
        /*
         * Send a message to topic2 and wait for it so we don't stop the container too soon.
         */
        if (message.topic().equals(topic1)) {
            template.send(topic2, "bar");
            template.flush();
            transactionalId.set(KafkaTestUtils.getPropertyValue(
                    ProducerFactoryUtils.getTransactionalResourceHolder(pf).getProducer(),
                    "delegate.transactionManager.transactionalId", String.class));
        }
    });

    @SuppressWarnings({ "rawtypes" })
    KafkaTransactionManager tm = new KafkaTransactionManager(pf);
    containerProps.setTransactionManager(tm);
    KafkaMessageListenerContainer<Integer, String> container = new KafkaMessageListenerContainer<>(cf,
            containerProps);
    container.setBeanName("testRollbackRecord");
    container.start();

    template.setDefaultTopic(topic1);
    template.executeInTransaction(t -> {
        template.sendDefault(0, 0, "foo");
        return null;
    });
    assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
    container.stop();
    Consumer<Integer, String> consumer = cf.createConsumer();
    final CountDownLatch subsLatch = new CountDownLatch(1);
    consumer.subscribe(Arrays.asList(topic1), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            // empty
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            subsLatch.countDown();
        }

    });
    ConsumerRecords<Integer, String> records = null;
    int n = 0;
    while (subsLatch.getCount() > 0 && n++ < 600) {
        records = consumer.poll(Duration.ofMillis(100));
    }
    assertThat(subsLatch.await(1, TimeUnit.MILLISECONDS)).isTrue();
    assertThat(records.count()).isEqualTo(0);
    // depending on timing, the position might include the offset representing the commit in the log
    assertThat(consumer.position(new TopicPartition(topic1, 0))).isGreaterThanOrEqualTo(1L);
    assertThat(transactionalId.get()).startsWith("rr.group.txTopic");
    assertThat(KafkaTestUtils.getPropertyValue(pf, "consumerProducers", Map.class)).isEmpty();
    logger.info("Stop testRollbackRecord");
    pf.destroy();
    consumer.close();
}

From source file:org.springframework.kafka.test.EmbeddedKafkaBroker.java

/**
 * Subscribe a consumer to one or more of the embedded topics.
 * @param consumer the consumer.//from www . j  a  va2 s .  c  o m
 * @param topics the topics.
 */
public void consumeFromEmbeddedTopics(Consumer<?, ?> consumer, String... topics) {
    HashSet<String> diff = new HashSet<>(Arrays.asList(topics));
    diff.removeAll(new HashSet<>(this.topics));
    assertThat(this.topics).as("topic(s):'" + diff + "' are not in embedded topic list")
            .containsAll(new HashSet<>(Arrays.asList(topics)));
    final AtomicBoolean assigned = new AtomicBoolean();
    consumer.subscribe(Arrays.asList(topics), new ConsumerRebalanceListener() {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            assigned.set(true);
            if (logger.isDebugEnabled()) {
                logger.debug("partitions assigned: " + partitions);
            }
        }

    });
    ConsumerRecords<?, ?> records = null;
    int n = 0;
    while (!assigned.get() && n++ < 600) { // NOSONAR magic #
        records = consumer.poll(Duration.ofMillis(100)); // force assignment NOSONAR magic #
    }
    if (records != null && records.count() > 0) {
        final ConsumerRecords<?, ?> theRecords = records;
        if (logger.isDebugEnabled()) {
            logger.debug("Records received on initial poll for assignment; re-seeking to beginning; " + records
                    .partitions().stream().flatMap(p -> theRecords.records(p).stream())
                    // map to same format as send metadata toString()
                    .map(r -> r.topic() + "-" + r.partition() + "@" + r.offset()).collect(Collectors.toList()));
        }
        consumer.seekToBeginning(records.partitions());
    }
    assertThat(assigned.get()).as("Failed to be assigned partitions from the embedded topics").isTrue();
    logger.debug("Subscription Initiated");
}

From source file:org.springframework.kafka.test.utils.KafkaTestUtils.java

/**
 * Poll the consumer for records.//  w  w w.  j  a v a  2s.c o  m
 * @param consumer the consumer.
 * @param timeout max time in milliseconds to wait for records; forwarded to {@link Consumer#poll(long)}.
 * @param <K> the key type.
 * @param <V> the value type.
 * @return the records.
 * @since 2.0
 */
public static <K, V> ConsumerRecords<K, V> getRecords(Consumer<K, V> consumer, long timeout) {
    logger.debug("Polling...");
    ConsumerRecords<K, V> received = consumer.poll(Duration.ofMillis(timeout));
    if (logger.isDebugEnabled()) {
        logger.debug("Received: " + received.count() + ", "
                + received.partitions().stream().flatMap(p -> received.records(p).stream())
                        // map to same format as send metadata toString()
                        .map(r -> r.topic() + "-" + r.partition() + "@" + r.offset())
                        .collect(Collectors.toList()));
    }
    assertThat(received).as("null received from consumer.poll()").isNotNull();
    return received;
}

From source file:org.springframework.messaging.tcp.reactor.ReactorNettyTcpClient.java

private <T> Function<Flux<T>, Publisher<?>> reconnectFunction(ReconnectStrategy reconnectStrategy) {
    return flux -> flux.scan(1, (count, element) -> count++)
            .flatMap(attempt -> Optional.ofNullable(reconnectStrategy.getTimeToNextAttempt(attempt))
                    .map(time -> Mono.delay(Duration.ofMillis(time), this.scheduler)).orElse(Mono.empty()));
}

From source file:org.springframework.web.reactive.socket.WebSocketIntegrationTests.java

@Test
public void echo() throws Exception {
    int count = 100;
    Flux<String> input = Flux.range(1, count).map(index -> "msg-" + index);
    ReplayProcessor<Object> output = ReplayProcessor.create(count);

    this.client.execute(getUrl("/echo"), session -> {
        logger.debug("Starting to send messages");
        return session.send(input.doOnNext(s -> logger.debug("outbound " + s)).map(session::textMessage))
                .thenMany(session.receive().take(count).map(WebSocketMessage::getPayloadAsText))
                .subscribeWith(output).doOnNext(s -> logger.debug("inbound " + s)).then().doOnSuccessOrError(
                        (aVoid, ex) -> logger.debug("Done with " + (ex != null ? ex.getMessage() : "success")));
    }).block(Duration.ofMillis(5000));

    assertEquals(input.collectList().block(Duration.ofMillis(5000)),
            output.collectList().block(Duration.ofMillis(5000)));
}