List of usage examples for java.io InterruptedIOException InterruptedIOException
public InterruptedIOException(String s)
InterruptedIOException
with the specified detail message. From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
private void createEmptyObject(final String objectName) throws AmazonClientException, AmazonServiceException, InterruptedIOException { final InputStream im = new InputStream() { @Override//from w w w .j av a2 s . c om public int read() throws IOException { return -1; } }; PutObjectRequest putObjectRequest = newPutObjectRequest(objectName, newObjectMetadata(0L), im); Upload upload = putObject(putObjectRequest); try { upload.waitForUploadResult(); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted creating " + objectName); } incrementPutProgressStatistics(objectName, 0); instrumentation.directoryCreated(); }
From source file:org.apache.hadoop.hbase.client.HTable.java
/** * {@inheritDoc}/*from www. ja v a 2 s .c o m*/ */ @Override public <T extends Service, R> void coprocessorService(final Class<T> service, byte[] startKey, byte[] endKey, final Batch.Call<T, R> callable, final Batch.Callback<R> callback) throws ServiceException, Throwable { // get regions covered by the row range List<byte[]> keys = getStartKeysInRange(startKey, endKey); Map<byte[], Future<R>> futures = new TreeMap<byte[], Future<R>>(Bytes.BYTES_COMPARATOR); for (final byte[] r : keys) { final RegionCoprocessorRpcChannel channel = new RegionCoprocessorRpcChannel(connection, tableName, r); Future<R> future = pool.submit(new Callable<R>() { public R call() throws Exception { T instance = ProtobufUtil.newServiceStub(service, channel); R result = callable.call(instance); byte[] region = channel.getLastRegion(); if (callback != null) { callback.update(region, r, result); } return result; } }); futures.put(r, future); } for (Map.Entry<byte[], Future<R>> e : futures.entrySet()) { try { e.getValue().get(); } catch (ExecutionException ee) { LOG.warn("Error calling coprocessor service " + service.getName() + " for row " + Bytes.toStringBinary(e.getKey()), ee); throw ee.getCause(); } catch (InterruptedException ie) { throw new InterruptedIOException("Interrupted calling coprocessor service " + service.getName() + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); } } }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Disables the cross site table./*from w w w. ja v a 2s . c om*/ * * @param tableName * @throws Exception */ private void disableTableInternal(final String tableName) throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("Start to disable the cross site table " + tableName); } for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) { // create a ephemeral node boolean locked = false; try { locked = znodes.lockTable(tableName); if (locked) { TableState tableState = znodes.getTableState(tableName); if (LOG.isDebugEnabled()) { LOG.debug("The state of " + tableName + " is " + tableState); } if (!TableState.ENABLED.equals(tableState)) { if (TableState.DISABLED.equals(tableState)) { throw new TableNotEnabledException(tableName); } else if (TableState.ENABLING.equals(tableState) || TableState.DISABLING.equals(tableState)) { LOG.info("Try to disable a cross site table " + tableName + " in the ENABLING/DISABLING state"); } else { throw new TableAbnormalStateException(tableName + ":" + tableState); } } // call HBaseAdmin.disableTable Map<String, ClusterInfo> clusterInfo = znodes.listClusterInfos(); // update the state to disabling znodes.setTableState(tableName, TableState.DISABLING); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); for (final Entry<String, ClusterInfo> entry : clusterInfo.entrySet()) { results.add(pool.submit(new CrossSiteCallable<Void>(conf) { @Override public Void call() throws Exception { String clusterTableName = CrossSiteUtil.getClusterTableName(tableName, entry.getKey()); HBaseAdmin admin = createHBaseAmin(configuration, entry.getValue().getAddress()); try { disableTable(admin, clusterTableName); } finally { try { admin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin", e); } } return null; } })); } try { for (Future<Void> result : results) { // directly throw the exception. result.get(); } // update the znode state znodes.setTableState(tableName, TableState.DISABLED); } catch (Exception e) { LOG.error("Fail to disable the cross site table " + tableName, e); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.debug("The cross site table " + tableName + " is disabled"); } return; } } finally { if (locked) { znodes.unlockTable(tableName); } } if (tries < this.numRetries * this.retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site HTable disable"); } } } throw new IOException( "Retries exhausted, it took too long to wait" + " for the table " + tableName + " to be disabled."); }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Deletes a cross site table, meanwhile delete all its replicated tables. * //from w w w . ja v a 2s . c om * @param tableName * @throws Exception */ private void deleteTableInternal(final String tableName) throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("Start to delete the cross site table " + tableName); } for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) { boolean locked = false; try { locked = znodes.lockTable(tableName); if (locked) { TableState tableState = znodes.getTableState(tableName); if (LOG.isDebugEnabled()) { LOG.debug("The state of " + tableName + " is " + tableState); } if (!TableState.DISABLED.equals(tableState)) { if (TableState.ENABLED.equals(tableState)) { throw new TableNotDisabledException(tableName); } else if (TableState.DELETING.equals(tableState)) { LOG.info("Try to delete the cross site table " + tableName + " in the DELETING state"); } else { throw new TableAbnormalStateException(tableName + ":" + tableState); } } Map<String, ClusterInfo> clusters = znodes.listClusterInfos(); // update the table state to deleting znodes.setTableState(tableName, TableState.DELETING); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); for (final Entry<String, ClusterInfo> entry : clusters.entrySet()) { results.add(pool.submit(new CrossSiteCallable<Void>(conf) { @Override public Void call() throws Exception { String clusterTableName = CrossSiteUtil.getClusterTableName(tableName, entry.getKey()); HBaseAdmin admin = createHBaseAmin(configuration, entry.getValue().getAddress()); try { if (admin.tableExists(clusterTableName)) { if (!admin.isTableDisabled(clusterTableName)) { admin.disableTable(clusterTableName); } admin.deleteTable(clusterTableName); } } finally { try { admin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin", e); } } // remove the tables in peers. ClusterInfo ci = entry.getValue(); if (ci.getPeers() != null && !ci.getPeers().isEmpty()) { for (ClusterInfo peer : ci.getPeers()) { LOG.info("Start to delete the table " + clusterTableName + " from the peer " + peer.getAddress()); HBaseAdmin peerAdmin = createHBaseAmin(configuration, peer.getAddress()); try { if (peerAdmin.tableExists(clusterTableName)) { if (!peerAdmin.isTableDisabled(clusterTableName)) { peerAdmin.disableTable(clusterTableName); } peerAdmin.deleteTable(clusterTableName); } } finally { try { peerAdmin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin of the peer " + peer.getAddress(), e); } } } } return null; } })); } try { for (Future<Void> result : results) { result.get(); } // remove the znodes to the {tableName}. znodes.deleteTableZNode(tableName); } catch (Exception e) { LOG.error("Fail to delete the cross site table " + tableName, e); throw new IOException(e); } LOG.info("The znode of " + tableName + " is deleted"); return; } } finally { if (locked) { znodes.unlockTable(tableName); } } if (tries < this.numRetries * this.retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site table delete"); } } } // throw an exception throw new IOException("Retries exhausted while still waiting for table: " + tableName + " to be delete"); }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Creates a cross site table. If the table need be replicated to the peers, this table will * created in the peers./* w ww .j a v a 2 s . c om*/ * * @param desc * @param splitKeys * @param locator * @param createAgainIfAlreadyExists * @throws Exception */ private void createTableInternal(final HTableDescriptor desc, final byte[][] splitKeys, final ClusterLocator locator, boolean createAgainIfAlreadyExists) throws Exception { if (LOG.isDebugEnabled()) { LOG.debug("Start to create the cross site table " + desc.getNameAsString()); } final String tableName = desc.getNameAsString(); for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) { if (znodes.isTableStateExist(tableName)) { throw new TableExistsException(tableName); } boolean locked = false; try { locked = znodes.lockTable(tableName); if (locked) { if (znodes.isTableStateExist(tableName)) { throw new TableExistsException(tableName); } final boolean createTableInPeers = isReplicatedTable(desc); Map<String, ClusterInfo> clusters = znodes.listClusterInfos(); znodes.createTableZNode(tableName); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); createTableOnClusters(desc, splitKeys, locator, tableName, createTableInPeers, clusters, results, createAgainIfAlreadyExists); try { for (Future<Void> result : results) { result.get(); } LOG.info("The cross site table " + desc.getNameAsString() + " is created"); // add the znodes to the {tableName}. addTableChildrenZNodes(desc, splitKeys, locator); return; } catch (Exception e) { LOG.error("Fail to create the cross site table:" + tableName, e); // remove the table znode // leave all the created HTables znodes.deleteTableZNode(tableName); throw new IOException(e); } } } finally { if (locked) { znodes.unlockTable(tableName); } } if (tries < this.numRetries * this.retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site table creation"); } } } // throw an exception throw new IOException("Retries exhausted while still waiting for table: " + tableName + " to be created"); }
From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java
/** * Check to see if HBase is running. Throw an exception if not. * We consider that HBase is running if ZooKeeper and Master are running. * * @param conf system configuration/* w w w . j av a2 s . c o m*/ * @throws MasterNotRunningException if the master is not running * @throws ZooKeeperConnectionException if unable to connect to zookeeper */ public static void checkHBaseAvailable(Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException, IOException { Configuration copyOfConf = HBaseConfiguration.create(conf); // We set it to make it fail as soon as possible if HBase is not available copyOfConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); copyOfConf.setInt("zookeeper.recovery.retry", 0); ConnectionManager.HConnectionImplementation connection = (ConnectionManager.HConnectionImplementation) HConnectionManager .getConnection(copyOfConf); try { // Check ZK first. // If the connection exists, we may have a connection to ZK that does // not work anymore ZooKeeperKeepAliveConnection zkw = null; try { zkw = connection.getKeepAliveZooKeeperWatcher(); zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false); } catch (IOException e) { throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException("Can't connect to ZooKeeper") .initCause(e); } catch (KeeperException e) { throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); } finally { if (zkw != null) { zkw.close(); } } // Check Master connection.isMasterRunning(); } finally { connection.close(); } }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Modifies the column.// w w w. j a v a2 s . c om * * @param tableName * @param hcd * @throws IOException * @throws KeeperException */ private void modifyColumnInternal(final String tableName, final HColumnDescriptor hcd) throws IOException, KeeperException { int tries = 0; for (; tries < numRetries * retryLongerMultiplier; ++tries) { if (znodes.lockTable(tableName)) { try { final HTableDescriptor htd = getTableDescriptor(tableName); final HTableDescriptor oldHtd = new HTableDescriptor(htd); final HColumnDescriptor oldHcd = htd.getFamily(hcd.getName()); if (oldHcd == null) { throw new InvalidFamilyOperationException( "Family '" + hcd.getNameAsString() + "' doesn't exists so cannot be modified"); } htd.addFamily(hcd); if (LOG.isDebugEnabled()) { LOG.debug( "Modifying the column of the cross site table : " + tableName + " column : " + hcd); } TableState tableState = znodes.getTableState(tableName); if (LOG.isDebugEnabled()) { LOG.debug("The state of " + tableName + " is " + tableState); } if (!TableState.DISABLED.equals(tableState)) { if (TableState.ENABLED.equals(tableState)) { throw new TableNotDisabledException(tableName); } else if (TableState.MODIFYINGCOLUMN.equals(tableState)) { if (!htd.equals(znodes.getProposedTableDesc(tableName))) { throw new TableAbnormalStateException( "A previous incomplete modifyColumn request with different HColumnDescriptor" + " details! Please pass same details"); } LOG.info("Try to modify a column for the cross site table " + tableName + " in the MODIFYINGCOLUMN state"); } else { throw new TableAbnormalStateException(tableName + ":" + tableState); } } Map<String, ClusterInfo> clusters = znodes.listClusterInfos(); znodes.writeProposedTableDesc(tableName, htd); // update the table state MODIFYINGCOLUMN znodes.setTableState(tableName, TableState.MODIFYINGCOLUMN); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); for (final Entry<String, ClusterInfo> entry : clusters.entrySet()) { final String clusterName = entry.getKey(); results.add(pool.submit(new CrossSiteCallable<Void>(conf) { @Override public Void call() throws Exception { ClusterInfo ci = entry.getValue(); String clusterTableName = CrossSiteUtil.getClusterTableName(tableName, clusterName); if (LOG.isDebugEnabled()) { LOG.debug("Modifying the column " + hcd.getNameAsString() + " of the table " + clusterTableName + " in cluster " + clusterName); } HBaseAdmin admin = createHBaseAmin(configuration, ci.getAddress()); try { admin.modifyColumn(clusterTableName, hcd); } finally { try { admin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin", e); } } // When this column was not replication enabled but changing the scope now, we // need to add the column into the peer table also // When this column was replication enabled but changing the scope to 0 now, no // need to delete the column. Let that be there in the peer with old data. if (hcd.getScope() > 0 && ci.getPeers() != null) { HColumnDescriptor peerHcd = new HColumnDescriptor(hcd); peerHcd.setScope(0); boolean tableAlreadyReplicated = isReplicatedTable(oldHtd); for (ClusterInfo peer : ci.getPeers()) { if (oldHcd.getScope() == 0) { if (tableAlreadyReplicated) { addColumn(configuration, clusterName, peer.getAddress(), tableName, peerHcd, true); } else { String peerTableName = CrossSiteUtil.getPeerClusterTableName( tableName, clusterName, peer.getName()); HBaseAdmin peerAdmin = createHBaseAmin(configuration, peer.getAddress()); try { if (peerAdmin.tableExists(peerTableName)) { addColumn(peerAdmin, clusterName, peer.getAddress(), tableName, peerHcd, true); } else { // create the table in the peer. byte[][] splitKeys = getTableSplitsForCluster(tableName, clusterName); HTableDescriptor peerHtd = new HTableDescriptor(htd); peerHtd.setName(Bytes.toBytes(peerTableName)); for (HColumnDescriptor column : peerHtd .getColumnFamilies()) { // only create the CFs that have the scope as 1. if (column.getScope() > 0) { column.setScope(0); } else { peerHtd.removeFamily(column.getName()); } } if (LOG.isDebugEnabled()) { LOG.debug("Creating table " + peerTableName + " in peer cluster " + peer); } peerAdmin.createTable(peerHtd, splitKeys); } } finally { try { peerAdmin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin", e); } } } } else { modifyColumnInPeer(configuration, clusterName, tableName, peer, peerHcd); } } } return null; } })); } try { for (Future<Void> result : results) { result.get(); } // modify the znodes to the {tableName}. znodes.modifyTableDesc(tableName, htd); znodes.setTableState(tableName, TableState.DISABLED); znodes.deleteProposedTableDesc(tableName); } catch (Exception e) { LOG.error("Fail to modify the column of the table " + tableName, e); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.debug("The column of the cross site table " + tableName + " is modified"); } return; } finally { znodes.unlockTable(tableName); } } if (tries < numRetries * retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site HTable enable"); } } } // All retries for acquiring locks failed! Thowing Exception throw new RetriesExhaustedException("Not able to acquire table lock after " + tries + " tries"); }
From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java
/** * Take a snapshot and wait for the server to complete that snapshot (blocking). * <p>//from ww w. j a v a2 s. c o m * Only a single snapshot should be taken at a time for an instance of HBase, or results may be * undefined (you can tell multiple HBase clusters to snapshot at the same time, but only one at a * time for a single cluster). * <p> * Snapshots are considered unique based on <b>the name of the snapshot</b>. Attempts to take a * snapshot with the same name (even a different type or with different parameters) will fail with * a {@link SnapshotCreationException} indicating the duplicate naming. * <p> * Snapshot names follow the same naming constraints as tables in HBase. See * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. * <p> * You should probably use {@link #snapshot(String, String)} or {@link #snapshot(byte[], byte[])} * unless you are sure about the type of snapshot that you want to take. * @param snapshot snapshot to take * @throws IOException or we lose contact with the master. * @throws SnapshotCreationException if snapshot failed to be taken * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException, IllegalArgumentException { // actually take the snapshot SnapshotResponse response = takeSnapshotAsync(snapshot); final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build(); IsSnapshotDoneResponse done = null; long start = EnvironmentEdgeManager.currentTimeMillis(); long max = response.getExpectedTimeout(); long maxPauseTime = max / this.numRetries; int tries = 0; LOG.debug( "Waiting a max of " + max + " ms for snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + maxPauseTime + " ms per retry)"); while (tries == 0 || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done.getDone())) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); sleep = sleep > maxPauseTime ? maxPauseTime : sleep; LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for snapshot completion."); Thread.sleep(sleep); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); } LOG.debug("Getting current status of snapshot from master..."); done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) { @Override public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException { return master.isSnapshotDone(null, request); } }); } if (!done.getDone()) { throw new SnapshotCreationException( "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:" + max + " ms", snapshot); } }
From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java
/** * Restore the specified snapshot on the original table. (The table must be disabled) * If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken * before executing the restore operation. * In case of restore failure, the failsafe snapshot will be restored. * If the restore completes without problem the failsafe snapshot is deleted. * * The failsafe snapshot name is configurable by using the property * "hbase.snapshot.restore.failsafe.name". * * @param snapshotName name of the snapshot to restore * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly *///from w w w.j a v a2 s. c o m public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot) throws IOException, RestoreSnapshotException { TableName tableName = null; for (SnapshotDescription snapshotInfo : listSnapshots()) { if (snapshotInfo.getName().equals(snapshotName)) { tableName = TableName.valueOf(snapshotInfo.getTable()); break; } } if (tableName == null) { throw new RestoreSnapshotException("Unable to find the table name for snapshot=" + snapshotName); } // The table does not exists, switch to clone. if (!tableExists(tableName)) { try { cloneSnapshot(snapshotName, tableName); } catch (InterruptedException e) { throw new InterruptedIOException( "Interrupted when restoring a nonexistent table: " + e.getMessage()); } return; } // Check if the table is disabled if (!isTableDisabled(tableName)) { throw new TableNotDisabledException(tableName); } // Take a snapshot of the current state String failSafeSnapshotSnapshotName = null; if (takeFailSafeSnapshot) { failSafeSnapshotSnapshotName = conf.get("hbase.snapshot.restore.failsafe.name", "hbase-failsafe-{snapshot.name}-{restore.timestamp}"); failSafeSnapshotSnapshotName = failSafeSnapshotSnapshotName.replace("{snapshot.name}", snapshotName) .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTimeMillis())); LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); snapshot(failSafeSnapshotSnapshotName, tableName); } try { // Restore snapshot internalRestoreSnapshot(snapshotName, tableName); } catch (IOException e) { // Somthing went wrong during the restore... // if the pre-restore snapshot is available try to rollback if (takeFailSafeSnapshot) { try { internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName); String msg = "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded."; LOG.error(msg, e); throw new RestoreSnapshotException(msg, e); } catch (IOException ex) { String msg = "Failed to restore and rollback to snapshot=" + failSafeSnapshotSnapshotName; LOG.error(msg, ex); throw new RestoreSnapshotException(msg, e); } } else { throw new RestoreSnapshotException("Failed to restore snapshot=" + snapshotName, e); } } // If the restore is succeeded, delete the pre-restore snapshot if (takeFailSafeSnapshot) { try { LOG.info("Deleting restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); deleteSnapshot(failSafeSnapshotSnapshotName); } catch (IOException e) { LOG.error("Unable to remove the failsafe snapshot: " + failSafeSnapshotSnapshotName, e); } } }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Deletes the column.//from w w w. j a v a2 s. co m * * @param tableName * @param columnName * @throws IOException * @throws KeeperException */ private void deleteColumnInternal(final String tableName, final byte[] columnName) throws IOException, KeeperException { int tries = 0; for (; tries < numRetries * retryLongerMultiplier; ++tries) { if (znodes.lockTable(tableName)) { try { HTableDescriptor htd = getTableDescriptor(tableName); if (!htd.hasFamily(columnName)) { throw new InvalidFamilyOperationException( "Column family '" + Bytes.toString(columnName) + "' does not exist"); } final HColumnDescriptor removedHcd = htd.removeFamily(columnName); if (LOG.isDebugEnabled()) { LOG.debug("Deleting column from the cross site table " + tableName); } TableState tableState = znodes.getTableState(tableName); if (LOG.isDebugEnabled()) { LOG.debug("The state of " + tableName + " is " + tableState); } if (!TableState.DISABLED.equals(tableState)) { if (TableState.ENABLED.equals(tableState)) { throw new TableNotDisabledException(tableName); } else if (TableState.DELETINGCOLUMN.equals(tableState)) { if (!htd.equals(znodes.getProposedTableDesc(tableName))) { throw new TableAbnormalStateException( "A previous incomplete deleteColumn request with different HColumnDescriptor" + " details! Please pass same details"); } LOG.info("Try to delete a column for the cross site table " + tableName + " in the DELETINGCOLUMN state"); } else { throw new TableAbnormalStateException(tableName + ":" + tableState); } } Map<String, ClusterInfo> clusters = znodes.listClusterInfos(); znodes.writeProposedTableDesc(tableName, htd); // update the table state to DELETINGCOLUMN znodes.setTableState(tableName, TableState.DELETINGCOLUMN); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); for (final Entry<String, ClusterInfo> entry : clusters.entrySet()) { results.add(pool.submit(new CrossSiteCallable<Void>(conf) { @Override public Void call() throws Exception { ClusterInfo ci = entry.getValue(); String masterClusterName = entry.getKey(); String clusterTableName = CrossSiteUtil.getClusterTableName(tableName, masterClusterName); deleteColumn(configuration, ci.getAddress(), clusterTableName, columnName, false); if (removedHcd.getScope() > 0) { // This removed column was replication enabled! So deleting from peer tables as // well Set<ClusterInfo> peers = ci.getPeers(); if (peers != null) { for (ClusterInfo peer : peers) { String peerTableName = CrossSiteUtil.getPeerClusterTableName(tableName, masterClusterName, peer.getName()); if (LOG.isDebugEnabled()) { LOG.debug("Deleting column " + Bytes.toString(columnName) + " from table " + peerTableName + " in peer cluster " + peer); } deleteColumn(configuration, peer.getAddress(), peerTableName, columnName, true); } } } return null; } })); } try { for (Future<Void> result : results) { result.get(); } // modify the znodes to the {tableName}. znodes.modifyTableDesc(tableName, htd); znodes.setTableState(tableName, TableState.DISABLED); znodes.deleteProposedTableDesc(tableName); } catch (Exception e) { LOG.error("Fail to delete the column from the table " + tableName, e); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.debug("The column of the cross site table " + tableName + " is deleted"); } return; } finally { znodes.unlockTable(tableName); } } if (tries < numRetries * retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site HTable enable"); } } } }