List of usage examples for java.util.concurrent CompletableFuture CompletableFuture
public CompletableFuture()
From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java
private CompletableFuture<Void> deleteLedger(BookKeeper bk, LedgerHandle lh) { CompletableFuture<Void> bkf = new CompletableFuture<>(); bk.asyncDeleteLedger(lh.getId(), (rc, ctx) -> { if (rc != BKException.Code.OK) { bkf.completeExceptionally(BKException.create(rc)); } else {//w w w . java2s .c o m bkf.complete(null); } }, null); return bkf; }
From source file:org.openhab.binding.mqtt.generic.internal.generic.ChannelState.java
/** * Publishes a value on MQTT. A command topic needs to be set in the configuration. * * @param command The command to send// w w w. j a v a 2s. c o m * @return A future that completes with true if the publishing worked and false and/or exceptionally otherwise. */ public CompletableFuture<@Nullable Void> publishValue(Command command) { cachedValue.update(command); String mqttCommandValue = cachedValue.getMQTTpublishValue(); final MqttBrokerConnection connection = this.connection; if (!readOnly && connection != null) { // Formatter: Applied before the channel state value is published to the MQTT broker. if (config.formatBeforePublish.length() > 0) { try (Formatter formatter = new Formatter()) { Formatter format = formatter.format(config.formatBeforePublish, mqttCommandValue); mqttCommandValue = format.toString(); } catch (IllegalFormatException e) { logger.debug("Format pattern incorrect for {}", channelUID, e); } } // Outgoing transformations for (ChannelStateTransformation t : transformationsOut) { mqttCommandValue = t.processValue(mqttCommandValue); } // Send retained messages if this is a stateful channel return connection.publish(config.commandTopic, mqttCommandValue.getBytes(), 1, config.retained) .thenRun(() -> { }); } else { CompletableFuture<@Nullable Void> f = new CompletableFuture<>(); f.completeExceptionally(new IllegalStateException("No connection or readOnly channel!")); return f; } }
From source file:io.pravega.controller.store.stream.InMemoryStream.java
@Override CompletableFuture<Void> updateHistoryTable(Data<Integer> updated) { Preconditions.checkNotNull(updated); Preconditions.checkNotNull(updated.getData()); CompletableFuture<Void> result = new CompletableFuture<>(); synchronized (lock) { if (historyTable == null) { result.completeExceptionally(StoreException.create(StoreException.Type.DATA_NOT_FOUND, "Historytable for stream: " + getName())); } else {//from w ww. ja va 2 s.com if (historyTable.getVersion().equals(updated.getVersion())) { historyTable = new Data<>(Arrays.copyOf(updated.getData(), updated.getData().length), updated.getVersion() + 1); result.complete(null); } else { result.completeExceptionally(StoreException.create(StoreException.Type.WRITE_CONFLICT, "Historytable for stream: " + getName())); } } } return result; }
From source file:org.apache.pulsar.client.impl.PulsarClientImpl.java
@Override public CompletableFuture<Reader> createReaderAsync(String topic, MessageId startMessageId, ReaderConfiguration conf) {/*w ww. j a v a 2s . co m*/ if (state.get() != State.Open) { return FutureUtil .failedFuture(new PulsarClientException.AlreadyClosedException("Client already closed")); } if (!DestinationName.isValid(topic)) { return FutureUtil .failedFuture(new PulsarClientException.InvalidTopicNameException("Invalid topic name")); } if (startMessageId == null) { return FutureUtil.failedFuture( new PulsarClientException.InvalidConfigurationException("Invalid startMessageId")); } if (conf == null) { return FutureUtil.failedFuture( new PulsarClientException.InvalidConfigurationException("Consumer configuration undefined")); } CompletableFuture<Reader> readerFuture = new CompletableFuture<>(); getPartitionedTopicMetadata(topic).thenAccept(metadata -> { if (log.isDebugEnabled()) { log.debug("[{}] Received topic metadata. partitions: {}", topic, metadata.partitions); } if (metadata.partitions > 1) { readerFuture.completeExceptionally( new PulsarClientException("Topic reader cannot be created on a partitioned topic")); return; } CompletableFuture<Consumer> consumerSubscribedFuture = new CompletableFuture<>(); // gets the next single threaded executor from the list of executors ExecutorService listenerThread = externalExecutorProvider.getExecutor(); ReaderImpl reader = new ReaderImpl(PulsarClientImpl.this, topic, startMessageId, conf, listenerThread, consumerSubscribedFuture); synchronized (consumers) { consumers.put(reader.getConsumer(), Boolean.TRUE); } consumerSubscribedFuture.thenRun(() -> { readerFuture.complete(reader); }).exceptionally(ex -> { log.warn("[{}] Failed to get create topic reader", topic, ex); readerFuture.completeExceptionally(ex); return null; }); }).exceptionally(ex -> { log.warn("[{}] Failed to get partitioned topic metadata", topic, ex); readerFuture.completeExceptionally(ex); return null; }); return readerFuture; }
From source file:com.ikanow.aleph2.storm.harvest_technology.StormHarvestTechnologyModule.java
@Override public CompletableFuture<BasicMessageBean> onNewSource(DataBucketBean new_bucket, IHarvestContext context, boolean enabled) { logger.info("received new source request, enabled: " + enabled); CompletableFuture<BasicMessageBean> future = new CompletableFuture<BasicMessageBean>(); if (enabled) { //build out a topology for these config options String job_name = getJobName(new_bucket); StormTopology topology = null;/*from w ww.ja v a 2 s . co m*/ try { topology = StormHarvestTechnologyTopologyUtil.createTopology(new_bucket.harvest_configs(), job_name, context, new_bucket); } catch (Exception e) { //set failure in completable future future.complete(new BasicMessageBean(new Date(), false, null, "onNewSource", null, ErrorUtils.getLongForm("{0}", e), null)); return future; } try { //step1 create a megajar from: //context.getHarvestLibraries(Optional.of(new_bucket)); //and whatever jars i need to read raw data, parse that data, output to context.stream(); //step2 send this jar + topology to storm so it starts logger.debug("creating jar to submit"); final String input_jar_location = System.getProperty("java.io.tmpdir") + File.separator + job_name + ".jar"; List<String> jars_to_merge = new ArrayList<String>(); jars_to_merge.addAll(context.getHarvestContextLibraries(Optional.empty())); if (isOnlyHadoopDep(jars_to_merge)) { // special case: no aleph2 libs found, this is almost certainly because this is being run from eclipse... final GlobalPropertiesBean globals = ModuleUtils.getGlobalProperties(); logger.warn( "WARNING: no library files found, probably because this is running from an IDE - instead taking all JARs from: " + (globals.local_root_dir() + "/lib/")); try { //... and LiveInjecter doesn't work on classes ... as a backup just copy everything from "<LOCAL_ALEPH2_HOME>/lib" into there jars_to_merge .addAll(FileUtils .listFiles(new File(globals.local_root_dir() + "/lib/"), new String[] { "jar" }, false) .stream().map(File::toString).filter(file -> { return !(file.contains("aleph2_storm_dependencies") || file.contains("aleph2_analytical_services_storm")); }).collect(Collectors.toList())); } catch (Exception e) { throw new RuntimeException("In eclipse/IDE mode, directory not found: " + (globals.local_root_dir() + "/lib/")); } } //filter the harvester out of the harvest libraries Map<String, String> harvest_libraries = context.getHarvestLibraries(Optional.of(new_bucket)).get(); //kick the harvest library out of our jar (it contains storm.jar which we can't send to storm) List<String> harvest_library_paths = harvest_libraries.keySet().stream() .filter(name -> !name.contains(new_bucket.harvest_technology_name_or_id())) .map(name -> harvest_libraries.get(name)).collect(Collectors.toList()); jars_to_merge.addAll(harvest_library_paths); JarBuilderUtil.mergeJars(jars_to_merge, input_jar_location, dirs_to_ignore); StormControllerUtil.startJob(storm_controller, job_name, input_jar_location, topology); //verify job was assigned some executors TopologyInfo info = StormControllerUtil.getJobStats(storm_controller, job_name); if (info.get_executors_size() == 0) { //no executors were available for this job, stop the job, throw an error StormControllerUtil.stopJob(storm_controller, job_name); future.complete(new BasicMessageBean(new Date(), false, null, "onNewSource", null, "No executors were assigned to this job, typically this is because too many jobs are currently running, kill some other jobs and resubmit.", null)); return future; } } catch (Exception e) { //set failure in completable future future.complete(new BasicMessageBean(new Date(), false, null, "onNewSource", null, ErrorUtils.getLongForm("{0}", e), null)); return future; } } //TODO return something useful future.complete(new BasicMessageBean(new Date(), true, null, "onNewSource", null, null, null)); return future; }
From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java
private CompletableFuture<Void> closeLedger(LedgerHandle lh) { CompletableFuture<Void> bkf = new CompletableFuture<>(); lh.asyncClose((rc, ledger, ctx) -> { if (rc != BKException.Code.OK) { bkf.completeExceptionally(BKException.create(rc)); } else {//from w w w . j av a 2 s .co m bkf.complete(null); } }, null); return bkf; }
From source file:co.runrightfast.vertx.orientdb.verticle.OrientDBVerticleTest.java
@Test public void testEventLogRepository() throws Exception { final Vertx vertx = vertxService.getVertx(); final RunRightFastVerticleId verticleManagerId = EventLogRepository.VERTICLE_ID; final CompletableFuture<GetEventCount.Response> getEventCountFuture = new CompletableFuture<>(); final long timeout = 60000L; // because the verticles are deployed asynchronously, the EventLogRepository verticle may not yet be deployed yet // the message codec for the Verticle only gets registered, while the verticle is starting. Thus, the message codec may not yet be registered. while (true) { try {//www . j a va2s . c om vertx.eventBus().send(EventBusAddress.eventBusAddress(verticleManagerId, GetEventCount.class), GetEventCount.Request.getDefaultInstance(), new DeliveryOptions().setSendTimeout(timeout), responseHandler(getEventCountFuture, GetEventCount.Response.class)); break; } catch (final IllegalArgumentException e) { if (e.getMessage().contains("No message codec for type")) { log.log(WARNING, "Waiting for EventLogRepository ... ", e); Thread.sleep(5000L); } else { throw e; } } } final GetEventCount.Response getEventCountResponse = getEventCountFuture.get(timeout, TimeUnit.MILLISECONDS); assertThat(getEventCountResponse.getCount(), is(0L)); final CompletableFuture<CreateEvent.Response> createEventFuture = new CompletableFuture<>(); vertx.eventBus().send(EventBusAddress.eventBusAddress(verticleManagerId, CreateEvent.class), CreateEvent.Request.newBuilder().setEvent("testEventLogRepository").build(), new DeliveryOptions().setSendTimeout(timeout), responseHandler(createEventFuture, CreateEvent.Response.class)); final CreateEvent.Response createEventResponse = createEventFuture.get(timeout, TimeUnit.MILLISECONDS); log.info(String.format("record id = %d::%d", createEventResponse.getId().getClusterId(), createEventResponse.getId().getPosition())); final CompletableFuture<GetEventCount.Response> getEventCountFuture2 = new CompletableFuture<>(); vertx.eventBus().send(EventBusAddress.eventBusAddress(verticleManagerId, GetEventCount.class), GetEventCount.Request.getDefaultInstance(), new DeliveryOptions().setSendTimeout(timeout), responseHandler(getEventCountFuture2, GetEventCount.Response.class)); final GetEventCount.Response getEventCountResponse2 = getEventCountFuture2.get(timeout, TimeUnit.MILLISECONDS); assertThat(getEventCountResponse2.getCount(), is(1L)); }
From source file:co.runrightfast.vertx.demo.testHarness.jmx.DemoMXBeanImpl.java
@Override public long eventLogRecordCount() { if (this.getEventCountMessageProducer == null) { getEventCountMessageProducer = new ProtobufMessageProducer<>(vertx.eventBus(), EventBusAddress.eventBusAddress(EventLogRepository.VERTICLE_ID, GetEventCount.class), new ProtobufMessageCodec<>(GetEventCount.Request.getDefaultInstance()), metricRegistry); }//www.ja v a2 s . c o m try { final CompletableFuture<GetEventCount.Response> getEventCountFuture = new CompletableFuture<>(); getEventCountMessageProducer.send(GetEventCount.Request.getDefaultInstance(), responseHandler(getEventCountFuture, GetEventCount.Response.class)); final GetEventCount.Response response = getEventCountFuture.get(2, TimeUnit.SECONDS); return response.getCount(); } catch (final InterruptedException | ExecutionException | TimeoutException ex) { log.logp(SEVERE, getClass().getName(), "getEventLogRecordCount", "failed", ex); throw new RuntimeException("Failed to get event log record count: " + ex.getMessage()); } }
From source file:org.apache.pulsar.compaction.TwoPhaseCompactor.java
private CompletableFuture<Void> addToCompactedLedger(LedgerHandle lh, RawMessage m) { CompletableFuture<Void> bkf = new CompletableFuture<>(); ByteBuf serialized = m.serialize();/*w w w . j a v a2s . com*/ lh.asyncAddEntry(serialized, (rc, ledger, eid, ctx) -> { if (rc != BKException.Code.OK) { bkf.completeExceptionally(BKException.create(rc)); } else { bkf.complete(null); } }, null); return bkf; }
From source file:org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.java
public AsyncScanSingleRegionRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn, Scan scan, long scannerId, ScanResultCache resultCache, RawScanResultConsumer consumer, Interface stub, HRegionLocation loc, long scannerLeaseTimeoutPeriodNs, long pauseNs, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { this.retryTimer = retryTimer; this.scan = scan; this.scannerId = scannerId; this.resultCache = resultCache; this.consumer = consumer; this.stub = stub; this.loc = loc; this.scannerLeaseTimeoutPeriodNs = scannerLeaseTimeoutPeriodNs; this.pauseNs = pauseNs; this.maxAttempts = maxAttempts; this.scanTimeoutNs = scanTimeoutNs; this.rpcTimeoutNs = rpcTimeoutNs; this.startLogErrorsCnt = startLogErrorsCnt; if (scan.isReversed()) { completeWhenNoMoreResultsInRegion = this::completeReversedWhenNoMoreResultsInRegion; } else {// w w w . ja v a2 s . co m completeWhenNoMoreResultsInRegion = this::completeWhenNoMoreResultsInRegion; } this.future = new CompletableFuture<>(); this.controller = conn.rpcControllerFactory.newController(); this.exceptions = new ArrayList<>(); }