List of usage examples for java.util.concurrent CompletableFuture complete
public boolean complete(T value)
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> move(byte[] regionName, ServerName destServerName) { Preconditions.checkNotNull(destServerName, "destServerName is null. If you don't specify a destServerName, use move(byte[]) instead"); CompletableFuture<Void> future = new CompletableFuture<>(); addListener(getRegionInfo(regionName), (regionInfo, err) -> { if (err != null) { future.completeExceptionally(err); return; }// w ww .ja v a2s. c o m addListener( moveRegion(regionInfo, RequestConverter .buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
/** * Get the region location for the passed region name. The region name may be a full region name * or encoded region name. If the region does not found, then it'll throw an * UnknownRegionException wrapped by a {@link CompletableFuture} * @param regionNameOrEncodedRegionName region name or encoded region name * @return region location, wrapped by a {@link CompletableFuture} *//*from www . j ava 2 s . c om*/ @VisibleForTesting CompletableFuture<HRegionLocation> getRegionLocation(byte[] regionNameOrEncodedRegionName) { if (regionNameOrEncodedRegionName == null) { return failedFuture(new IllegalArgumentException("Passed region name can't be null")); } try { CompletableFuture<Optional<HRegionLocation>> future; if (RegionInfo.isEncodedRegionName(regionNameOrEncodedRegionName)) { String encodedName = Bytes.toString(regionNameOrEncodedRegionName); if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { // old format encodedName, should be meta region future = connection.registry.getMetaRegionLocation() .thenApply(locs -> Stream.of(locs.getRegionLocations()) .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)) .findFirst()); } else { future = AsyncMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, regionNameOrEncodedRegionName); } } else { RegionInfo regionInfo = MetaTableAccessor .parseRegionInfoFromRegionName(regionNameOrEncodedRegionName); if (regionInfo.isMetaRegion()) { future = connection.registry.getMetaRegionLocation() .thenApply(locs -> Stream.of(locs.getRegionLocations()) .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) .findFirst()); } else { future = AsyncMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); } } CompletableFuture<HRegionLocation> returnedFuture = new CompletableFuture<>(); addListener(future, (location, err) -> { if (err != null) { returnedFuture.completeExceptionally(err); return; } if (!location.isPresent() || location.get().getRegion() == null) { returnedFuture.completeExceptionally( new UnknownRegionException("Invalid region name or encoded region name: " + Bytes.toStringBinary(regionNameOrEncodedRegionName))); } else { returnedFuture.complete(location.get()); } }); return returnedFuture; } catch (IOException e) { return failedFuture(e); } }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> disableTableReplication(TableName tableName) { if (tableName == null) { return failedFuture(new IllegalArgumentException("Table name is null")); }//from w w w . j ava2 s . co m CompletableFuture<Void> future = new CompletableFuture<>(); addListener(tableExists(tableName), (exist, err) -> { if (err != null) { future.completeExceptionally(err); return; } if (!exist) { future.completeExceptionally( new TableNotFoundException("Table '" + tableName.getNameAsString() + "' does not exists.")); return; } addListener(setTableReplication(tableName, false), (result, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(result); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
/** * Compact column family of a table, Asynchronous operation even if CompletableFuture.get() *//*from ww w .j a v a 2 s. com*/ private CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily, boolean major, CompactType compactType) { CompletableFuture<Void> future = new CompletableFuture<>(); switch (compactType) { case MOB: addListener(connection.registry.getMasterAddress(), (serverName, err) -> { if (err != null) { future.completeExceptionally(err); return; } RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName); addListener(compact(serverName, regionInfo, major, columnFamily), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); break; case NORMAL: addListener(getTableHRegionLocations(tableName), (locations, err) -> { if (err != null) { future.completeExceptionally(err); return; } if (locations == null || locations.isEmpty()) { future.completeExceptionally(new TableNotFoundException(tableName)); } CompletableFuture<?>[] compactFutures = locations.stream().filter(l -> l.getRegion() != null) .filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null) .map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily)) .toArray(CompletableFuture<?>[]::new); // future complete unless all of the compact futures are completed. addListener(CompletableFuture.allOf(compactFutures), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); break; default: throw new IllegalArgumentException("Unknown compactType: " + compactType); } return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
/** * List all region locations for the specific table. *///from ww w.ja va 2s . co m private CompletableFuture<List<HRegionLocation>> getTableHRegionLocations(TableName tableName) { if (TableName.META_TABLE_NAME.equals(tableName)) { CompletableFuture<List<HRegionLocation>> future = new CompletableFuture<>(); // For meta table, we use zk to fetch all locations. AsyncRegistry registry = AsyncRegistryFactory.getRegistry(connection.getConfiguration()); addListener(registry.getMetaRegionLocation(), (metaRegions, err) -> { if (err != null) { future.completeExceptionally(err); } else if (metaRegions == null || metaRegions.isEmpty() || metaRegions.getDefaultRegionLocation() == null) { future.completeExceptionally(new IOException("meta region does not found")); } else { future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); } // close the registry. IOUtils.closeQuietly(registry); }); return future; } else { // For non-meta table, we fetch all locations by scanning hbase:meta table return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName)); } }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> flushRegion(byte[] regionName) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(getRegionLocation(regionName), (location, err) -> { if (err != null) { future.completeExceptionally(err); return; }/*from www .j a va 2s .c o m*/ ServerName serverName = location.getServerName(); if (serverName == null) { future.completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } addListener(flush(serverName, location.getRegion()), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); return future; }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java
CompletableFuture<ReadHandle> getLedgerHandle(long ledgerId) { CompletableFuture<ReadHandle> ledgerHandle = ledgerCache.get(ledgerId); if (ledgerHandle != null) { return ledgerHandle; }// w w w . j av a2 s. com // If not present try again and create if necessary return ledgerCache.computeIfAbsent(ledgerId, lid -> { // Open the ledger for reading if it was not already opened if (log.isDebugEnabled()) { log.debug("[{}] Asynchronously opening ledger {} for read", name, ledgerId); } mbean.startDataLedgerOpenOp(); CompletableFuture<ReadHandle> promise = new CompletableFuture<>(); LedgerInfo info = ledgers.get(ledgerId); CompletableFuture<ReadHandle> openFuture = new CompletableFuture<>(); if (info != null && info.hasOffloadContext() && info.getOffloadContext().getComplete()) { UUID uid = new UUID(info.getOffloadContext().getUidMsb(), info.getOffloadContext().getUidLsb()); // TODO: improve this to load ledger offloader by driver name recorded in metadata openFuture = config.getLedgerOffloader().readOffloaded(ledgerId, uid, OffloadUtils.getOffloadDriverMetadata(info)); } else { openFuture = bookKeeper.newOpenLedgerOp().withRecovery(!isReadOnly()).withLedgerId(ledgerId) .withDigestType(config.getDigestType()).withPassword(config.getPassword()).execute(); } openFuture.whenCompleteAsync((res, ex) -> { mbean.endDataLedgerOpenOp(); if (ex != null) { ledgerCache.remove(ledgerId, promise); promise.completeExceptionally(createManagedLedgerException(ex)); } else { if (log.isDebugEnabled()) { log.debug("[{}] Successfully opened ledger {} for reading", name, ledgerId); } promise.complete(res); } }, executor.chooseThread(name)); return promise; }); }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> removeReplicationPeerTableCFs(String id, Map<TableName, List<String>> tableCfs) { if (tableCfs == null) { return failedFuture(new ReplicationException("tableCfs is null")); }/*w w w . j a v a2s .c o m*/ CompletableFuture<Void> future = new CompletableFuture<Void>(); addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = null; try { newPeerConfig = ReplicationPeerConfigUtil.removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id); } catch (ReplicationException e) { future.completeExceptionally(e); return; } addListener(updateReplicationPeerConfig(id, newPeerConfig), (result, err) -> { if (!completeExceptionally(future, error)) { future.complete(result); } }); } }); return future; }
From source file:com.yahoo.pulsar.broker.service.BrokerService.java
public CompletableFuture<ManagedLedgerConfig> getManagedLedgerConfig(DestinationName topicName) { CompletableFuture<ManagedLedgerConfig> future = new CompletableFuture<>(); // Execute in background thread, since getting the policies might block if the z-node wasn't already cached pulsar.getOrderedExecutor().submitOrdered(topicName, safeRun(() -> { NamespaceName namespace = topicName.getNamespaceObject(); ServiceConfiguration serviceConfig = pulsar.getConfiguration(); // Get persistence policy for this destination Policies policies;//w ww . j ava 2 s . c o m try { policies = pulsar .getConfigurationCache().policiesCache().get(AdminResource.path("policies", namespace.getProperty(), namespace.getCluster(), namespace.getLocalName())) .orElse(null); } catch (Throwable t) { // Ignoring since if we don't have policies, we fallback on the default log.warn("Got exception when reading persistence policy for {}: {}", topicName, t.getMessage(), t); future.completeExceptionally(t); return; } PersistencePolicies persistencePolicies = policies != null ? policies.persistence : null; RetentionPolicies retentionPolicies = policies != null ? policies.retention_policies : null; if (persistencePolicies == null) { // Apply default values persistencePolicies = new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(), serviceConfig.getManagedLedgerDefaultWriteQuorum(), serviceConfig.getManagedLedgerDefaultAckQuorum(), serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit()); } if (retentionPolicies == null) { retentionPolicies = new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(), serviceConfig.getDefaultRetentionSizeInMB()); } ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble()); config.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum()); config.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum()); config.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate()); config.setDigestType(DigestType.CRC32); config.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger()); config.setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(), TimeUnit.MINUTES); config.setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(), TimeUnit.MINUTES); config.setMaxSizePerLedgerMb(2048); config.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize()); config.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum()); config.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum()); config.setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger()); config.setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds()); config.setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES); config.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB()); future.complete(config); }, (exception) -> future.completeExceptionally(exception))); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
/** * Set the table's replication switch if the table's replication switch is already not set. * @param tableName name of the table/* w w w . j a v a 2 s. c o m*/ * @param enableRep is replication switch enable or disable */ private CompletableFuture<Void> setTableReplication(TableName tableName, boolean enableRep) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(getDescriptor(tableName), (tableDesc, err) -> { if (err != null) { future.completeExceptionally(err); return; } if (!tableDesc.matchReplicationScope(enableRep)) { int scope = enableRep ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL; TableDescriptor newTableDesc = TableDescriptorBuilder.newBuilder(tableDesc) .setReplicationScope(scope).build(); addListener(modifyTable(newTableDesc), (result, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(result); } }); } else { future.complete(null); } }); return future; }