List of usage examples for java.util.concurrent CompletableFuture allOf
public static CompletableFuture<Void> allOf(CompletableFuture<?>... cfs)
From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java
private CompletableFuture<HTableDescriptor[]> batchTableOperations(Pattern pattern, TableOperator operator, String operationType) {//from w w w . jav a 2 s . com CompletableFuture<HTableDescriptor[]> future = new CompletableFuture<>(); List<HTableDescriptor> failed = new LinkedList<>(); listTables(pattern, false).whenComplete((tables, error) -> { if (error != null) { future.completeExceptionally(error); return; } CompletableFuture[] futures = Arrays.stream(tables) .map((table) -> operator.operate(table.getTableName()).whenComplete((v, ex) -> { if (ex != null) { LOG.info("Failed to " + operationType + " table " + table.getTableName(), ex); failed.add(table); } })).toArray(size -> new CompletableFuture[size]); CompletableFuture.allOf(futures).thenAccept((v) -> { future.complete(failed.toArray(new HTableDescriptor[failed.size()])); }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> flushRegionServer(ServerName sn) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(getRegions(sn), (hRegionInfos, err) -> { if (err != null) { future.completeExceptionally(err); return; }/*from www. j av a2 s. c o m*/ List<CompletableFuture<Void>> compactFutures = new ArrayList<>(); if (hRegionInfos != null) { hRegionInfos.forEach(region -> compactFutures.add(flush(sn, region))); } addListener( CompletableFuture .allOf(compactFutures.toArray(new CompletableFuture<?>[compactFutures.size()])), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
private CompletableFuture<Void> compactRegionServer(ServerName sn, boolean major) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(getRegions(sn), (hRegionInfos, err) -> { if (err != null) { future.completeExceptionally(err); return; }//from w w w . j av a 2 s . c om List<CompletableFuture<Void>> compactFutures = new ArrayList<>(); if (hRegionInfos != null) { hRegionInfos.forEach(region -> compactFutures.add(compact(sn, region, major, null))); } addListener( CompletableFuture .allOf(compactFutures.toArray(new CompletableFuture<?>[compactFutures.size()])), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
/** * Compact column family of a table, Asynchronous operation even if CompletableFuture.get() */// w w w . j a v a 2s . com private CompletableFuture<Void> compact(TableName tableName, byte[] columnFamily, boolean major, CompactType compactType) { CompletableFuture<Void> future = new CompletableFuture<>(); switch (compactType) { case MOB: addListener(connection.registry.getMasterAddress(), (serverName, err) -> { if (err != null) { future.completeExceptionally(err); return; } RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName); addListener(compact(serverName, regionInfo, major, columnFamily), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); break; case NORMAL: addListener(getTableHRegionLocations(tableName), (locations, err) -> { if (err != null) { future.completeExceptionally(err); return; } if (locations == null || locations.isEmpty()) { future.completeExceptionally(new TableNotFoundException(tableName)); } CompletableFuture<?>[] compactFutures = locations.stream().filter(l -> l.getRegion() != null) .filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null) .map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily)) .toArray(CompletableFuture<?>[]::new); // future complete unless all of the compact futures are completed. addListener(CompletableFuture.allOf(compactFutures), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(ret); } }); }); break; default: throw new IllegalArgumentException("Unknown compactType: " + compactType); } return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> split(TableName tableName) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(tableExists(tableName), (exist, error) -> { if (error != null) { future.completeExceptionally(error); return; }//from w ww. ja v a 2 s . c o m if (!exist) { future.completeExceptionally(new TableNotFoundException(tableName)); return; } addListener( metaTable.scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY) .withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION)) .withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))), (results, err2) -> { if (err2 != null) { future.completeExceptionally(err2); return; } if (results != null && !results.isEmpty()) { List<CompletableFuture<Void>> splitFutures = new ArrayList<>(); for (Result r : results) { if (r.isEmpty() || MetaTableAccessor.getRegionInfo(r) == null) { continue; } RegionLocations rl = MetaTableAccessor.getRegionLocations(r); if (rl != null) { for (HRegionLocation h : rl.getRegionLocations()) { if (h != null && h.getServerName() != null) { RegionInfo hri = h.getRegion(); if (hri == null || hri.isSplitParent() || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; } splitFutures.add(split(hri, null)); } } } } addListener( CompletableFuture.allOf( splitFutures.toArray(new CompletableFuture<?>[splitFutures.size()])), (ret, exception) -> { if (exception != null) { future.completeExceptionally(exception); return; } future.complete(ret); }); } else { future.complete(null); } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
private CompletableFuture<Void> internalDeleteSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) { CompletableFuture<List<SnapshotDescription>> listSnapshotsFuture; if (tableNamePattern == null) { listSnapshotsFuture = getCompletedSnapshots(snapshotNamePattern); } else {/*from w w w. jav a 2 s. c o m*/ listSnapshotsFuture = getCompletedSnapshots(tableNamePattern, snapshotNamePattern); } CompletableFuture<Void> future = new CompletableFuture<>(); addListener(listSnapshotsFuture, ((snapshotDescriptions, err) -> { if (err != null) { future.completeExceptionally(err); return; } if (snapshotDescriptions == null || snapshotDescriptions.isEmpty()) { future.complete(null); return; } addListener(CompletableFuture.allOf(snapshotDescriptions.stream().map(this::internalDeleteSnapshot) .toArray(CompletableFuture[]::new)), (v, e) -> { if (e != null) { future.completeExceptionally(e); } else { future.complete(v); } }); })); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Void> updateConfiguration() { CompletableFuture<Void> future = new CompletableFuture<Void>(); addListener(getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.MASTER, Option.BACKUP_MASTERS)), (status, err) -> {/* ww w . jav a 2 s.co m*/ if (err != null) { future.completeExceptionally(err); } else { List<CompletableFuture<Void>> futures = new ArrayList<>(); status.getLiveServerMetrics().keySet() .forEach(server -> futures.add(updateConfiguration(server))); futures.add(updateConfiguration(status.getMasterName())); status.getBackupMasterNames().forEach(master -> futures.add(updateConfiguration(master))); addListener( CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])), (result, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(result); } }); } }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<CompactionState> getCompactionState(TableName tableName, CompactType compactType) { CompletableFuture<CompactionState> future = new CompletableFuture<>(); switch (compactType) { case MOB:/*from ww w. j a v a 2s . co m*/ addListener(connection.registry.getMasterAddress(), (serverName, err) -> { if (err != null) { future.completeExceptionally(err); return; } RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName); addListener(this.<GetRegionInfoResponse>newAdminCaller().serverName(serverName) .action((controller, stub) -> this .<GetRegionInfoRequest, GetRegionInfoResponse, GetRegionInfoResponse>adminCall( controller, stub, RequestConverter.buildGetRegionInfoRequest(regionInfo.getRegionName(), true), (s, c, req, done) -> s.getRegionInfo(controller, req, done), resp -> resp)) .call(), (resp2, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { if (resp2.hasCompactionState()) { future.complete(ProtobufUtil.createCompactionState(resp2.getCompactionState())); } else { future.complete(CompactionState.NONE); } } }); }); break; case NORMAL: addListener(getTableHRegionLocations(tableName), (locations, err) -> { if (err != null) { future.completeExceptionally(err); return; } ConcurrentLinkedQueue<CompactionState> regionStates = new ConcurrentLinkedQueue<>(); List<CompletableFuture<CompactionState>> futures = new ArrayList<>(); locations.stream().filter(loc -> loc.getServerName() != null).filter(loc -> loc.getRegion() != null) .filter(loc -> !loc.getRegion().isOffline()).map(loc -> loc.getRegion().getRegionName()) .forEach(region -> { futures.add(getCompactionStateForRegion(region).whenComplete((regionState, err2) -> { // If any region compaction state is MAJOR_AND_MINOR // the table compaction state is MAJOR_AND_MINOR, too. if (err2 != null) { future.completeExceptionally(unwrapCompletionException(err2)); } else if (regionState == CompactionState.MAJOR_AND_MINOR) { future.complete(regionState); } else { regionStates.add(regionState); } })); }); addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])), (ret, err3) -> { // If future not completed, check all regions's compaction state if (!future.isCompletedExceptionally() && !future.isDone()) { CompactionState state = CompactionState.NONE; for (CompactionState regionState : regionStates) { switch (regionState) { case MAJOR: if (state == CompactionState.MINOR) { future.complete(CompactionState.MAJOR_AND_MINOR); } else { state = CompactionState.MAJOR; } break; case MINOR: if (state == CompactionState.MAJOR) { future.complete(CompactionState.MAJOR_AND_MINOR); } else { state = CompactionState.MINOR; } break; case NONE: default: } } if (!future.isDone()) { future.complete(state); } } }); }); break; default: throw new IllegalArgumentException("Unknown compactType: " + compactType); } return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
@Override public CompletableFuture<Map<ServerName, Boolean>> compactionSwitch(boolean switchState, List<String> serverNamesList) { CompletableFuture<Map<ServerName, Boolean>> future = new CompletableFuture<>(); addListener(getRegionServerList(serverNamesList), (serverNames, err) -> { if (err != null) { future.completeExceptionally(err); return; }/*www. ja va 2 s . c o m*/ // Accessed by multiple threads. Map<ServerName, Boolean> serverStates = new ConcurrentHashMap<>(serverNames.size()); List<CompletableFuture<Boolean>> futures = new ArrayList<>(serverNames.size()); serverNames.stream().forEach(serverName -> { futures.add(switchCompact(serverName, switchState).whenComplete((serverState, err2) -> { if (err2 != null) { future.completeExceptionally(unwrapCompletionException(err2)); } else { serverStates.put(serverName, serverState); } })); }); addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])), (ret, err3) -> { if (!future.isCompletedExceptionally()) { if (err3 != null) { future.completeExceptionally(err3); } else { future.complete(serverStates); } } }); }); return future; }
From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java
/** * Connect to peer and check the table descriptor on peer: * <ol>/* w w w . j av a 2 s .c o m*/ * <li>Create the same table on peer when not exist.</li> * <li>Throw an exception if the table already has replication enabled on any of the column * families.</li> * <li>Throw an exception if the table exists on peer cluster but descriptors are not same.</li> * </ol> * @param tableName name of the table to sync to the peer * @param splits table split keys */ private CompletableFuture<Void> checkAndSyncTableToPeerClusters(TableName tableName, byte[][] splits) { CompletableFuture<Void> future = new CompletableFuture<>(); addListener(listReplicationPeers(), (peers, err) -> { if (err != null) { future.completeExceptionally(err); return; } if (peers == null || peers.size() <= 0) { future.completeExceptionally( new IllegalArgumentException("Found no peer cluster for replication.")); return; } List<CompletableFuture<Void>> futures = new ArrayList<>(); peers.stream().filter(peer -> peer.getPeerConfig().needToReplicate(tableName)).forEach(peer -> { futures.add(trySyncTableToPeerCluster(tableName, splits, peer)); }); addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])), (result, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { future.complete(result); } }); }); return future; }