List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<CacheEvictionStats> clearBlockCache(TableName tableName) { CompletableFuture<CacheEvictionStats> future = new CompletableFuture<>(); addListener(getTableHRegionLocations(tableName), (locations, err) -> { if (err != null) { future.completeExceptionally(err); return; }/* w w w.j a v a 2 s . co m*/ Map<ServerName, List<RegionInfo>> regionInfoByServerName = locations.stream() .filter(l -> l.getRegion() != null).filter(l -> !l.getRegion().isOffline()) .filter(l -> l.getServerName() != null).collect(Collectors.groupingBy(l -> l.getServerName(), Collectors.mapping(l -> l.getRegion(), Collectors.toList()))); List<CompletableFuture<CacheEvictionStats>> futures = new ArrayList<>(); CacheEvictionStatsAggregator aggregator = new CacheEvictionStatsAggregator(); for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) { futures.add(clearBlockCache(entry.getKey(), entry.getValue()).whenComplete((stats, err2) -> { if (err2 != null) { future.completeExceptionally(unwrapCompletionException(err2)); } else { aggregator.append(stats); } })); } addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])), (ret, err3) -> { if (err3 != null) { future.completeExceptionally(unwrapCompletionException(err3)); } else { future.complete(aggregator.sum()); } }); }); return future; }
From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java
@Override public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) { CompletableFuture<Void> promise = new CompletableFuture<>(); scheduler.chooseThread(readHandle.getId()).submit(() -> { if (readHandle.getLength() == 0 || !readHandle.isClosed() || readHandle.getLastAddConfirmed() < 0) { promise.completeExceptionally( new IllegalArgumentException("An empty or open ledger should never be offloaded")); return; }//from w w w . j a v a2 s .co m OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create() .withLedgerMetadata(readHandle.getLedgerMetadata()) .withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize()); String dataBlockKey = dataBlockOffloadKey(readHandle.getId(), uuid); String indexBlockKey = indexBlockOffloadKey(readHandle.getId(), uuid); MultipartUpload mpu = null; List<MultipartPart> parts = Lists.newArrayList(); // init multi part upload for data block. try { BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey); addVersionInfo(blobBuilder, userMetadata); Blob blob = blobBuilder.build(); mpu = writeBlobStore.initiateMultipartUpload(writeBucket, blob.getMetadata(), new PutOptions()); } catch (Throwable t) { promise.completeExceptionally(t); return; } long dataObjectLength = 0; // start multi part upload for data block. try { long startEntry = 0; int partId = 1; long entryBytesWritten = 0; while (startEntry <= readHandle.getLastAddConfirmed()) { int blockSize = BlockAwareSegmentInputStreamImpl.calculateBlockSize(maxBlockSize, readHandle, startEntry, entryBytesWritten); try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl(readHandle, startEntry, blockSize)) { Payload partPayload = Payloads.newInputStreamPayload(blockStream); partPayload.getContentMetadata().setContentLength((long) blockSize); partPayload.getContentMetadata().setContentType("application/octet-stream"); parts.add(writeBlobStore.uploadMultipartPart(mpu, partId, partPayload)); log.debug("UploadMultipartPart. container: {}, blobName: {}, partId: {}, mpu: {}", writeBucket, dataBlockKey, partId, mpu.id()); indexBuilder.addBlock(startEntry, partId, blockSize); if (blockStream.getEndEntryId() != -1) { startEntry = blockStream.getEndEntryId() + 1; } else { // could not read entry from ledger. break; } entryBytesWritten += blockStream.getBlockEntryBytesCount(); partId++; } dataObjectLength += blockSize; } writeBlobStore.completeMultipartUpload(mpu, parts); mpu = null; } catch (Throwable t) { try { if (mpu != null) { writeBlobStore.abortMultipartUpload(mpu); } } catch (Throwable throwable) { log.error("Failed abortMultipartUpload in bucket - {} with key - {}, uploadId - {}.", writeBucket, dataBlockKey, mpu.id(), throwable); } promise.completeExceptionally(t); return; } // upload index block try (OffloadIndexBlock index = indexBuilder.withDataObjectLength(dataObjectLength).build(); OffloadIndexBlock.IndexInputStream indexStream = index.toStream()) { // write the index block BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey); addVersionInfo(blobBuilder, userMetadata); Payload indexPayload = Payloads.newInputStreamPayload(indexStream); indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize()); indexPayload.getContentMetadata().setContentType("application/octet-stream"); Blob blob = blobBuilder.payload(indexPayload).contentLength((long) indexStream.getStreamSize()) .build(); writeBlobStore.putBlob(writeBucket, blob); promise.complete(null); } catch (Throwable t) { try { writeBlobStore.removeBlob(writeBucket, dataBlockKey); } catch (Throwable throwable) { log.error("Failed deleteObject in bucket - {} with key - {}.", writeBucket, dataBlockKey, throwable); } promise.completeExceptionally(t); return; } }); return promise; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> execProcedure(String signature, String instance, Map<String, String> props) { CompletableFuture<Void> future = new CompletableFuture<>(); ProcedureDescription procDesc = ProtobufUtil.buildProcedureDescription(signature, instance, props); addListener(this.<Long>newMasterCaller() .action((controller, stub) -> this.<ExecProcedureRequest, ExecProcedureResponse, Long>call( controller, stub, ExecProcedureRequest.newBuilder().setProcedure(procDesc).build(), (s, c, req, done) -> s.execProcedure(c, req, done), resp -> resp.getExpectedTimeout())) .call(), (expectedTimeout, err) -> { if (err != null) { future.completeExceptionally(err); return; }//w w w .ja v a 2 s .co m TimerTask pollingTask = new TimerTask() { int tries = 0; long startTime = EnvironmentEdgeManager.currentTime(); long endTime = startTime + expectedTimeout; long maxPauseTime = expectedTimeout / maxAttempts; @Override public void run(Timeout timeout) throws Exception { if (EnvironmentEdgeManager.currentTime() < endTime) { addListener(isProcedureFinished(signature, instance, props), (done, err2) -> { if (err2 != null) { future.completeExceptionally(err2); return; } if (done) { future.complete(null); } else { // retry again after pauseTime. long pauseTime = ConnectionUtils .getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); pauseTime = Math.min(pauseTime, maxPauseTime); AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime, TimeUnit.MICROSECONDS); } }); } else { future.completeExceptionally( new IOException("Procedure '" + signature + " : " + instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); } } }; // Queue the polling task into RETRY_TIMER to poll procedure state asynchronously. AsyncConnectionImpl.RETRY_TIMER.newTimeout(pollingTask, 1, TimeUnit.MILLISECONDS); }); return future; }
From source file:org.jaqpot.core.service.client.jpdi.JPDIClientImpl.java
@Override public Future<Dataset> predict(Dataset inputDataset, Model model, MetaInfo datasetMeta, String taskId) { CompletableFuture<Dataset> futureDataset = new CompletableFuture<>(); Dataset dataset = DatasetFactory.copy(inputDataset); Dataset tempWithDependentFeatures = DatasetFactory.copy(dataset, new HashSet<>(model.getDependentFeatures())); dataset.getDataEntry().parallelStream().forEach(dataEntry -> { dataEntry.getValues().keySet().retainAll(model.getIndependentFeatures()); });// www.j av a2s. c om PredictionRequest predictionRequest = new PredictionRequest(); predictionRequest.setDataset(dataset); predictionRequest.setRawModel(model.getActualModel()); predictionRequest.setAdditionalInfo(model.getAdditionalInfo()); final HttpPost request = new HttpPost(model.getAlgorithm().getPredictionService()); request.addHeader("Accept", "application/json"); request.addHeader("Content-Type", "application/json"); PipedOutputStream out = new PipedOutputStream(); PipedInputStream in; try { in = new PipedInputStream(out); } catch (IOException ex) { futureDataset.completeExceptionally(ex); return futureDataset; } request.setEntity(new InputStreamEntity(in, ContentType.APPLICATION_JSON)); Future futureResponse = client.execute(request, new FutureCallback<HttpResponse>() { @Override public void completed(final HttpResponse response) { futureMap.remove(taskId); int status = response.getStatusLine().getStatusCode(); try { InputStream responseStream = response.getEntity().getContent(); switch (status) { case 200: case 201: try { PredictionResponse predictionResponse = serializer.parse(responseStream, PredictionResponse.class); List<LinkedHashMap<String, Object>> predictions = predictionResponse.getPredictions(); if (dataset.getDataEntry().isEmpty()) { DatasetFactory.addEmptyRows(dataset, predictions.size()); } List<Feature> features = featureHandler .findBySource("algorithm/" + model.getAlgorithm().getId()); IntStream.range(0, dataset.getDataEntry().size()) // .parallel() .forEach(i -> { Map<String, Object> row = predictions.get(i); DataEntry dataEntry = dataset.getDataEntry().get(i); if (model.getAlgorithm().getOntologicalClasses().contains("ot:Scaling") || model.getAlgorithm().getOntologicalClasses() .contains("ot:Transformation")) { dataEntry.getValues().clear(); dataset.getFeatures().clear(); } row.entrySet().stream().forEach(entry -> { // Feature feature = featureHandler.findByTitleAndSource(entry.getKey(), "algorithm/" + model.getAlgorithm().getId()); Feature feature = features.stream() .filter(f -> f.getMeta().getTitles().contains(entry.getKey())) .findFirst().orElse(null); if (feature == null) { return; } dataEntry.getValues().put(baseURI + "feature/" + feature.getId(), entry.getValue()); FeatureInfo featInfo = new FeatureInfo( baseURI + "feature/" + feature.getId(), feature.getMeta().getTitles().stream().findFirst().get()); featInfo.setCategory(Dataset.DescriptorCategory.PREDICTED); dataset.getFeatures().add(featInfo); }); }); dataset.setId(randomStringGenerator.nextString(20)); dataset.setTotalRows(dataset.getDataEntry().size()); dataset.setMeta(datasetMeta); futureDataset.complete(DatasetFactory.mergeColumns(dataset, tempWithDependentFeatures)); } catch (Exception ex) { futureDataset.completeExceptionally(ex); } break; case 400: String message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureDataset.completeExceptionally(new BadRequestException(message)); break; case 404: message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureDataset.completeExceptionally(new NotFoundException(message)); break; case 500: message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureDataset.completeExceptionally(new InternalServerErrorException(message)); break; default: message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureDataset.completeExceptionally(new InternalServerErrorException(message)); } } catch (IOException | UnsupportedOperationException ex) { futureDataset.completeExceptionally(ex); } } @Override public void failed(final Exception ex) { futureMap.remove(taskId); futureDataset.completeExceptionally(new InternalServerErrorException(ex)); } @Override public void cancelled() { futureMap.remove(taskId); futureDataset.cancel(true); } }); serializer.write(predictionRequest, out); try { out.close(); } catch (IOException ex) { futureDataset.completeExceptionally(ex); } futureMap.put(taskId, futureResponse); return futureDataset; }
From source file:org.jaqpot.core.service.client.jpdi.JPDIClientImpl.java
@Override public Future<Model> train(Dataset dataset, Algorithm algorithm, Map<String, Object> parameters, String predictionFeature, MetaInfo modelMeta, String taskId) { CompletableFuture<Model> futureModel = new CompletableFuture<>(); TrainingRequest trainingRequest = new TrainingRequest(); trainingRequest.setDataset(dataset); trainingRequest.setParameters(parameters); trainingRequest.setPredictionFeature(predictionFeature); // String trainingRequestString = serializer.write(trainingRequest); final HttpPost request = new HttpPost(algorithm.getTrainingService()); PipedOutputStream out = new PipedOutputStream(); PipedInputStream in;//from w ww. j a va 2 s . c o m try { in = new PipedInputStream(out); } catch (IOException ex) { futureModel.completeExceptionally(ex); return futureModel; } InputStreamEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON); entity.setChunked(true); request.setEntity(entity); request.addHeader("Accept", "application/json"); Future futureResponse = client.execute(request, new FutureCallback<HttpResponse>() { @Override public void completed(final HttpResponse response) { futureMap.remove(taskId); int status = response.getStatusLine().getStatusCode(); try { InputStream responseStream = response.getEntity().getContent(); switch (status) { case 200: case 201: TrainingResponse trainingResponse = serializer.parse(responseStream, TrainingResponse.class); Model model = new Model(); model.setId(randomStringGenerator.nextString(20)); model.setActualModel(trainingResponse.getRawModel()); model.setPmmlModel(trainingResponse.getPmmlModel()); model.setAdditionalInfo(trainingResponse.getAdditionalInfo()); model.setAlgorithm(algorithm); model.setParameters(parameters); model.setDatasetUri(dataset != null ? dataset.getDatasetURI() : null); //Check if independedFeatures of model exist in dataset List<String> filteredIndependedFeatures = new ArrayList<String>(); if (dataset != null && dataset.getFeatures() != null && trainingResponse.getIndependentFeatures() != null) for (String feature : trainingResponse.getIndependentFeatures()) { for (FeatureInfo featureInfo : dataset.getFeatures()) { if (feature.equals(featureInfo.getURI())) filteredIndependedFeatures.add(feature); } } model.setIndependentFeatures(filteredIndependedFeatures); model.setDependentFeatures(Arrays.asList(predictionFeature)); model.setMeta(modelMeta); List<String> predictedFeatures = new ArrayList<>(); for (String featureTitle : trainingResponse.getPredictedFeatures()) { Feature predictionFeatureResource = featureHandler.findByTitleAndSource(featureTitle, "algorithm/" + algorithm.getId()); if (predictionFeatureResource == null) { // Create the prediction features (POST /feature) String predFeatID = randomStringGenerator.nextString(12); predictionFeatureResource = new Feature(); predictionFeatureResource.setId(predFeatID); predictionFeatureResource.setPredictorFor(predictionFeature); predictionFeatureResource.setMeta(MetaInfoBuilder.builder() .addSources( /*messageBody.get("base_uri") + */"algorithm/" + algorithm.getId()) .addComments("Feature created to hold predictions by algorithm with ID " + algorithm.getId()) .addTitles(featureTitle).addSeeAlso(predictionFeature) .addCreators(algorithm.getMeta().getCreators()).build()); /* Create feature */ featureHandler.create(predictionFeatureResource); } predictedFeatures.add(baseURI + "feature/" + predictionFeatureResource.getId()); } model.setPredictedFeatures(predictedFeatures); futureModel.complete(model); break; case 400: String message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureModel.completeExceptionally(new BadRequestException(message)); break; case 500: message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureModel.completeExceptionally(new InternalServerErrorException(message)); break; default: message = new BufferedReader(new InputStreamReader(responseStream)).lines() .collect(Collectors.joining("\n")); futureModel.completeExceptionally(new InternalServerErrorException(message)); } } catch (IOException | UnsupportedOperationException ex) { futureModel.completeExceptionally(ex); } } @Override public void failed(final Exception ex) { futureMap.remove(taskId); futureModel.completeExceptionally(ex); } @Override public void cancelled() { futureMap.remove(taskId); futureModel.cancel(true); } }); serializer.write(trainingRequest, out); try { out.close(); } catch (IOException ex) { futureModel.completeExceptionally(ex); } futureMap.put(taskId, futureResponse); return futureModel; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> cloneTableSchema(TableName tableName, TableName newTableName, boolean preserveSplits) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(tableExists(tableName), (exist, err) -> { if (err != null) { future.completeExceptionally(err); return; }// w w w . java 2 s . co m if (!exist) { future.completeExceptionally(new TableNotFoundException(tableName)); return; } addListener(tableExists(newTableName), (exist1, err1) -> { if (err1 != null) { future.completeExceptionally(err1); return; } if (exist1) { future.completeExceptionally(new TableExistsException(newTableName)); return; } addListener(getDescriptor(tableName), (tableDesc, err2) -> { if (err2 != null) { future.completeExceptionally(err2); return; } TableDescriptor newTableDesc = TableDescriptorBuilder.copy(newTableName, tableDesc); if (preserveSplits) { addListener(getTableSplits(tableName), (splits, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { addListener(splits != null ? createTable(newTableDesc, splits) : createTable(newTableDesc), (result, err4) -> { if (err4 != null) { future.completeExceptionally(err4); } else { future.complete(result); } }); } }); } else { addListener(createTable(newTableDesc), (result, err5) -> { if (err5 != null) { future.completeExceptionally(err5); } else { future.complete(result); } }); } }); }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> split(TableName tableName) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(tableExists(tableName), (exist, error) -> { if (error != null) { future.completeExceptionally(error); return; }/*from www . j av a 2s. com*/ if (!exist) { future.completeExceptionally(new TableNotFoundException(tableName)); return; } addListener( metaTable.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY) .withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION)) .withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))), (results, err2) -> { if (err2 != null) { future.completeExceptionally(err2); return; } if (results != null && !results.isEmpty()) { List<CompletableFuture<Void>> splitFutures = new ArrayList<>(); for (Result r : results) { if (r.isEmpty() || MetaTableAccessor.getRegionInfo(r) == null) { continue; } RegionLocations rl = MetaTableAccessor.getRegionLocations(r); if (rl != null) { for (HRegionLocation h : rl.getRegionLocations()) { if (h != null && h.getServerName() != null) { RegionInfo hri = h.getRegion(); if (hri == null || hri.isSplitParent() || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; } splitFutures.add(split(hri, null)); } } } } addListener( CompletableFuture.allOf( splitFutures.toArray(new CompletableFuture<?>[splitFutures.size()])), (ret, exception) -> { if (exception != null) { future.completeExceptionally(exception); return; } future.complete(ret); }); } else { future.complete(null); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
private CompletableFuture<Void> restoreSnapshot(String snapshotName, TableName tableName, boolean takeFailSafeSnapshot, boolean restoreAcl) { if (takeFailSafeSnapshot) { CompletableFuture<Void> future = new CompletableFuture<>(); // Step.1 Take a snapshot of the current state String failSafeSnapshotSnapshotNameFormat = this.connection.getConfiguration().get( HConstants.SNAPSHOT_RESTORE_FAILSAFE_NAME, HConstants.DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME); final String failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotNameFormat .replace("{snapshot.name}", snapshotName) .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); addListener(snapshot(failSafeSnapshotSnapshotName, tableName), (ret, err) -> { if (err != null) { future.completeExceptionally(err); } else { // Step.2 Restore snapshot addListener(internalRestoreSnapshot(snapshotName, tableName, restoreAcl), (void2, err2) -> { if (err2 != null) { // Step.3.a Something went wrong during the restore and try to rollback. addListener(//from w w w . j a v a 2s. c o m internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl), (void3, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { String msg = "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded."; future.completeExceptionally(new RestoreSnapshotException(msg, err2)); } }); } else { // Step.3.b If the restore is succeeded, delete the pre-restore snapshot. LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); addListener(deleteSnapshot(failSafeSnapshotSnapshotName), (ret3, err3) -> { if (err3 != null) { LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, err3); future.completeExceptionally(err3); } else { future.complete(ret3); } }); } }); } }); return future; } else { return internalRestoreSnapshot(snapshotName, tableName, restoreAcl); } }
From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java
@Override public CompletableFuture<Consumer> subscribe(final ServerCnx cnx, String subscriptionName, long consumerId, SubType subType, int priorityLevel, String consumerName, boolean isDurable, MessageId startMessageId, Map<String, String> metadata, boolean readCompacted, InitialPosition initialPosition) { final CompletableFuture<Consumer> future = new CompletableFuture<>(); try {//from ww w. j av a2 s. co m brokerService.checkTopicNsOwnership(getName()); } catch (Exception e) { future.completeExceptionally(e); return future; } if (readCompacted && !(subType == SubType.Failover || subType == SubType.Exclusive)) { future.completeExceptionally( new NotAllowedException("readCompacted only allowed on failover or exclusive subscriptions")); return future; } if (isBlank(subscriptionName)) { if (log.isDebugEnabled()) { log.debug("[{}] Empty subscription name", topic); } future.completeExceptionally(new NamingException("Empty subscription name")); return future; } if (hasBatchMessagePublished && !cnx.isBatchMessageCompatibleVersion()) { if (log.isDebugEnabled()) { log.debug("[{}] Consumer doesn't support batch-message {}", topic, subscriptionName); } future.completeExceptionally(new UnsupportedVersionException("Consumer doesn't support batch-message")); return future; } if (subscriptionName.startsWith(replicatorPrefix) || subscriptionName.equals(DEDUPLICATION_CURSOR_NAME)) { log.warn("[{}] Failed to create subscription for {}", topic, subscriptionName); future.completeExceptionally( new NamingException("Subscription with reserved subscription name attempted")); return future; } if (cnx.getRemoteAddress() != null && cnx.getRemoteAddress().toString().contains(":")) { SubscribeRateLimiter.ConsumerIdentifier consumer = new SubscribeRateLimiter.ConsumerIdentifier( cnx.getRemoteAddress().toString().split(":")[0], consumerName, consumerId); if (subscribeRateLimiter.isPresent() && !subscribeRateLimiter.get().subscribeAvailable(consumer) || !subscribeRateLimiter.get().tryAcquire(consumer)) { log.warn("[{}] Failed to create subscription for {} {} limited by {}, available {}", topic, subscriptionName, consumer, subscribeRateLimiter.get().getSubscribeRate(), subscribeRateLimiter.get().getAvailableSubscribeRateLimit(consumer)); future.completeExceptionally( new NotAllowedException("Subscribe limited by subscribe rate limit per consumer.")); return future; } } lock.readLock().lock(); try { if (isFenced) { log.warn("[{}] Attempting to subscribe to a fenced topic", topic); future.completeExceptionally(new TopicFencedException("Topic is temporarily unavailable")); return future; } USAGE_COUNT_UPDATER.incrementAndGet(this); if (log.isDebugEnabled()) { log.debug("[{}] [{}] [{}] Added consumer -- count: {}", topic, subscriptionName, consumerName, USAGE_COUNT_UPDATER.get(this)); } } finally { lock.readLock().unlock(); } CompletableFuture<? extends Subscription> subscriptionFuture = isDurable ? // getDurableSubscription(subscriptionName, initialPosition) // : getNonDurableSubscription(subscriptionName, startMessageId); int maxUnackedMessages = isDurable ? brokerService.pulsar().getConfiguration().getMaxUnackedMessagesPerConsumer() : 0; subscriptionFuture.thenAccept(subscription -> { try { Consumer consumer = new Consumer(subscription, subType, topic, consumerId, priorityLevel, consumerName, maxUnackedMessages, cnx, cnx.getRole(), metadata, readCompacted, initialPosition); subscription.addConsumer(consumer); if (!cnx.isActive()) { consumer.close(); if (log.isDebugEnabled()) { log.debug("[{}] [{}] [{}] Subscribe failed -- count: {}", topic, subscriptionName, consumer.consumerName(), USAGE_COUNT_UPDATER.get(PersistentTopic.this)); } future.completeExceptionally( new BrokerServiceException("Connection was closed while the opening the cursor ")); } else { log.info("[{}][{}] Created new subscription for {}", topic, subscriptionName, consumerId); future.complete(consumer); } } catch (BrokerServiceException e) { if (e instanceof ConsumerBusyException) { log.warn("[{}][{}] Consumer {} {} already connected", topic, subscriptionName, consumerId, consumerName); } else if (e instanceof SubscriptionBusyException) { log.warn("[{}][{}] {}", topic, subscriptionName, e.getMessage()); } USAGE_COUNT_UPDATER.decrementAndGet(PersistentTopic.this); future.completeExceptionally(e); } }).exceptionally(ex -> { log.warn("[{}] Failed to create subscription for {}: ", topic, subscriptionName, ex.getMessage()); USAGE_COUNT_UPDATER.decrementAndGet(PersistentTopic.this); future.completeExceptionally(new PersistenceException(ex)); return null; }); return future; }
From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java
@Override public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(final List<O> new_objects, final boolean replace_if_present) { try {/* w w w . ja va2 s. c o m*/ final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObjects"); final BulkRequestBuilder brb = new_objects.stream() .reduce(_state.client.prepareBulk().setConsistencyLevel(WriteConsistencyLevel.ONE) .setRefresh(CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy), (acc, val) -> acc.add(singleObjectIndexRequest(Either.left(rw_context), Either.left(val), replace_if_present, true)), (acc1, acc2) -> { throw new RuntimeException("Internal logic error - Parallel not supported"); }); final BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>> action_handler = new BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>>() { // WARNING: mutable/imperative code ahead... long _curr_written = 0; List<Object> _id_list = null; HashMap<String, String> _mapping_failures = null; @Override public void accept(final BulkResponse result, final CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> future) { if (result.hasFailures() && (rw_context .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext)) { final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext auto_context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) rw_context .typeContext(); // Recursive builder in case I need to build a second batch of docs BulkRequestBuilder brb2 = null; if (null == _id_list) { _id_list = new LinkedList<Object>(); } HashMap<String, String> temp_mapping_failures = null; final Iterator<BulkItemResponse> it = result.iterator(); while (it.hasNext()) { final BulkItemResponse bir = it.next(); if (bir.isFailed()) { if (bir.getFailure().getMessage().startsWith("MapperParsingException")) { final Set<String> fixed_type_fields = rw_context.typeContext() .fixed_type_fields(); if (!fixed_type_fields.isEmpty()) { // Obtain the field name from the exception (if we fail then drop the record) final String field = getFieldFromParsingException( bir.getFailure().getMessage()); if ((null == field) || fixed_type_fields.contains(field)) { continue; } } //(else roll on to...) // OK this is the case where I might be able to apply auto types: if (null == brb2) { brb2 = _state.client.prepareBulk() .setConsistencyLevel(WriteConsistencyLevel.ONE).setRefresh( CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy); } String failed_json = null; if (null == _mapping_failures) { // first time through, use item id to grab the objects from the original request if (null == temp_mapping_failures) { temp_mapping_failures = new HashMap<String, String>(); } final ActionRequest<?> ar = brb.request().requests().get(bir.getItemId()); if (ar instanceof IndexRequest) { IndexRequest ir = (IndexRequest) ar; failed_json = ir.source().toUtf8(); temp_mapping_failures.put(bir.getId(), failed_json); } } else { // have already grabbed all the failure _ids and stuck in a map failed_json = _mapping_failures.get(bir.getId()); } if (null != failed_json) { brb2.add(singleObjectIndexRequest( Either.right(Tuples._2T(bir.getIndex(), ElasticsearchContextUtils.getNextAutoType( auto_context.getPrefix(), bir.getType()))), Either.right(Tuples._2T(bir.getId(), failed_json)), false, true)); } } // Ugh otherwise just silently fail I guess? //(should I also look for transient errors and resubmit them after a pause?!) } else { // (this item worked) _id_list.add(bir.getId()); _curr_written++; } } if (null != brb2) { // found mapping errors to retry with if (null == _mapping_failures) // (first level of recursion) _mapping_failures = temp_mapping_failures; // (note that if brb2.request().requests().isEmpty() this is an internal logic error, so it's OK to throw) ElasticsearchFutureUtils.wrap(brb2.execute(), future, this, (error, future2) -> { future2.completeExceptionally(error); }); } else { // relative success, plus we've built the list anyway future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written)); } } else { // No errors with this iteration of the bulk request _curr_written += result.getItems().length; if (null == _id_list) { // This is the first bulk request, no recursion on failures, so can lazily create the list in case it isn't needed final Supplier<List<Object>> get_objects = () -> { return StreamSupport.stream(result.spliterator(), false) .filter(bir -> !bir.isFailed()).map(bir -> bir.getId()) .collect(Collectors.toList()); }; final Supplier<Long> get_count_workaround = () -> { return StreamSupport.stream(result.spliterator(), false) .filter(bir -> !bir.isFailed()).collect(Collectors.counting()); }; get_count_workaround.get(); future.complete(Tuples._2T(get_objects, get_count_workaround)); } else { // have already calculated everything so just return it future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written)); } } } }; return ElasticsearchFutureUtils.wrap(brb.execute(), new CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>(), action_handler, (error, future) -> { future.completeExceptionally(error); }); } catch (Exception e) { return FutureUtils.returnError(e); } }