List of usage examples for java.util.concurrent CompletableFuture completedFuture
public static <U> CompletableFuture<U> completedFuture(U value)
From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java
@Override public ManagementFuture<Supplier<Object>> storeObject(T new_object, boolean replace_if_present) { _mutable_values.add(new_object); return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(() -> "id")); }
From source file:io.pravega.segmentstore.server.host.ZKSegmentContainerManagerTest.java
@Test public void testClose() throws Exception { @Cleanup/*w w w.j a va2s.com*/ CuratorFramework zkClient = startClient(); SegmentContainerRegistry containerRegistry = mock(SegmentContainerRegistry.class); ContainerHandle containerHandle1 = mock(ContainerHandle.class); when(containerHandle1.getContainerId()).thenReturn(1); when(containerRegistry.startContainer(eq(1), any())) .thenReturn(CompletableFuture.completedFuture(containerHandle1)); when(containerRegistry.stopContainer(any(), any())).thenReturn(CompletableFuture.completedFuture(null)); ZKSegmentContainerManager segManager = createContainerManager(containerRegistry, zkClient); segManager.initialize(); segManager.close(); }
From source file:io.pravega.controller.server.eventProcessor.AutoScaleRequestHandler.java
private CompletableFuture<Void> processScaleUp(final AutoScaleEvent request, final ScalingPolicy policy, final OperationContext context) { log.debug("scale up request received for stream {} segment {}", request.getStream(), request.getSegmentNumber()); if (policy.getType().equals(ScalingPolicy.Type.FIXED_NUM_SEGMENTS)) { return CompletableFuture.completedFuture(null); }/*from ww w . j a v a2 s . co m*/ return streamMetadataStore .getSegment(request.getScope(), request.getStream(), request.getSegmentNumber(), context, executor) .thenComposeAsync(segment -> { // do not go above scale factor. Minimum scale factor is 2 though. int numOfSplits = Math.min(request.getNumOfSplits(), Math.max(2, policy.getScaleFactor())); double delta = (segment.getKeyEnd() - segment.getKeyStart()) / numOfSplits; final ArrayList<AbstractMap.SimpleEntry<Double, Double>> simpleEntries = new ArrayList<>(); for (int i = 0; i < numOfSplits; i++) { simpleEntries.add(new AbstractMap.SimpleEntry<>(segment.getKeyStart() + delta * i, segment.getKeyStart() + (delta * (i + 1)))); } return postScaleRequest(request, Lists.newArrayList(request.getSegmentNumber()), simpleEntries); }, executor); }
From source file:io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask.java
private CompletableFuture<Void> processScaleUp(final AutoScaleEvent request, final ScalingPolicy policy, final OperationContext context) { log.info("scale up request received for stream {} segment {}", request.getStream(), request.getSegmentNumber()); if (policy.getScaleType().equals(ScalingPolicy.ScaleType.FIXED_NUM_SEGMENTS)) { return CompletableFuture.completedFuture(null); }/* w ww. j a v a2s . c o m*/ return streamMetadataStore .getSegment(request.getScope(), request.getStream(), request.getSegmentNumber(), context, executor) .thenComposeAsync(segment -> { // do not go above scale factor. Minimum scale factor is 2 though. int numOfSplits = Math.min(Math.max(2, request.getNumOfSplits()), Math.max(2, policy.getScaleFactor())); double delta = (segment.getKeyEnd() - segment.getKeyStart()) / numOfSplits; final ArrayList<AbstractMap.SimpleEntry<Double, Double>> simpleEntries = new ArrayList<>(); for (int i = 0; i < numOfSplits; i++) { simpleEntries.add(new AbstractMap.SimpleEntry<>(segment.getKeyStart() + delta * i, segment.getKeyStart() + (delta * (i + 1)))); } return postScaleRequest(request, Lists.newArrayList(request.getSegmentNumber()), simpleEntries); }, executor); }
From source file:io.symcpe.hendrix.api.dao.TestTemplateManager.java
@Before public void before() { em = emf.createEntityManager();/*from w w w.j a va 2 s . c om*/ when(am.getEM()).thenReturn(em); when(am.getRuleTopicName()).thenReturn("ruleTopic"); when(am.getTemplateTopicName()).thenReturn("templateTopic"); when(am.getKafkaProducer()).thenReturn(producer); when(producer.send(any())).thenReturn(CompletableFuture .completedFuture(new RecordMetadata(new TopicPartition("templateTopic", 2), 1, 1))); }
From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java
@Override public CompletableFuture<Supplier<Object>> storeObject(final FileDescriptor new_object) { final String path = output_directory + new_object.file_name(); _logger.debug("attempting to store object: " + path); try {/* w w w .j a va 2 s . c om*/ FileUtils.writeFile(fileContext, new_object.input_stream(), path); } catch (Exception e) { return FutureUtils.returnError(e); } //return file_name, that is what is used to query/delete by id return CompletableFuture.completedFuture(() -> new_object.file_name()); }
From source file:io.jmnarloch.spring.cloud.stream.binder.hermes.HermesClientBinderTest.java
@Test public void shouldPublishMessageWithError() { // given//w w w. j a v a 2 s . c om reset(hermesSender); final HermesResponse response = HermesResponseBuilder.hermesResponse().withHttpStatus(500).build(); when(hermesSender.send(any(URI.class), any(HermesMessage.class))) .thenReturn(CompletableFuture.completedFuture(response)); DirectChannel output = new DirectChannel(); // when Binding<MessageChannel> binding = binder.bindProducer(OUTPUT_NAME, output, new ExtendedProducerProperties<>(new HermesProducerProperties())); // then output.send(new GenericMessage<>(MESSAGE, json())); verify(hermesSender, times(4)).send(any(URI.class), any(HermesMessage.class)); binding.unbind(); }
From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java
@Override public ManagementFuture<Supplier<Object>> storeObject(T new_object) { _mutable_values.add(new_object); return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(() -> "id")); }
From source file:io.pravega.controller.server.ControllerService.java
public CompletableFuture<CreateStreamStatus> createStream(final StreamConfiguration streamConfig, final long createTimestamp) { Preconditions.checkNotNull(streamConfig, "streamConfig"); Preconditions.checkArgument(createTimestamp >= 0); try {// w w w . j a v a2s . com NameUtils.validateStreamName(streamConfig.getStreamName()); } catch (IllegalArgumentException | NullPointerException e) { log.warn("Create stream failed due to invalid stream name {}", streamConfig.getStreamName()); return CompletableFuture.completedFuture(CreateStreamStatus.newBuilder() .setStatus(CreateStreamStatus.Status.INVALID_STREAM_NAME).build()); } return streamMetadataTasks .createStream(streamConfig.getScope(), streamConfig.getStreamName(), streamConfig, createTimestamp) .thenApplyAsync(status -> CreateStreamStatus.newBuilder().setStatus(status).build(), executor); }
From source file:com.srotya.tau.api.dao.TestTemplateManager.java
@Before public void before() { em = emf.createEntityManager();// w ww .j ava 2 s. c o m when(am.getEM()).thenReturn(em); when(am.getSourcer()).thenReturn(kafkaCommandSourcer); kafkaCommandSourcer.setProducer(producer); kafkaCommandSourcer.setRuleTopicName("ruleTopic"); kafkaCommandSourcer.setTemplateTopicName("templateTopic"); when(producer.send(any())).thenReturn(CompletableFuture .completedFuture(new RecordMetadata(new TopicPartition("templateTopic", 2), 1, 1))); }