List of usage examples for java.io InterruptedIOException InterruptedIOException
public InterruptedIOException(String s)
InterruptedIOException
with the specified detail message. From source file:com.mellanox.r4h.DFSOutputStream.java
private void waitForAckedSeqno(long seqno) throws IOException { if (DFSOutputStream.LOG.isDebugEnabled()) { DFSOutputStream.LOG.debug("Waiting for ack for: " + seqno); }/* w w w .j a va 2 s . c o m*/ try { while (!closed) { checkClosed(); if (lastAckedSeqno >= seqno) { if (LOG.isTraceEnabled()) { LOG.trace("going to break loop for waiting to lastAckedSeqno(=" + lastAckedSeqno + ", seqnumToWaitFor=" + seqno + ")"); } break; } try { if (LOG.isTraceEnabled()) { LOG.trace("going to wait on ackQueue.wait() for lastAckedSeqno(=" + lastAckedSeqno + ", seqnumToWaitFor=" + seqno + ")"); } synchronized (ackQueue) { // in orig cdh wait on dataQ if (lastAckedSeqno < seqno) { ackQueue.wait(); } } if (LOG.isTraceEnabled()) { LOG.trace("After waiting on ackQueue.wait() for lastAckedSeqno(=" + lastAckedSeqno + ", seqnumToWaitFor=" + seqno + ")"); } } catch (InterruptedException ie) { throw new InterruptedIOException( "Interrupted while waiting for data to be acknowledged by pipeline"); } } checkClosed(); } catch (ClosedChannelException e) { } }
From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java
/** * Execute a distributed procedure on a cluster. * * @param signature A distributed procedure is uniquely identified * by its signature (default the root ZK node name of the procedure). * @param instance The instance name of the procedure. For some procedures, this parameter is * optional.//from ww w . j a v a 2s . c o m * @param props Property/Value pairs of properties passing to the procedure */ public void execProcedure(String signature, String instance, Map<String, String> props) throws IOException { ProcedureDescription.Builder builder = ProcedureDescription.newBuilder(); builder.setSignature(signature).setInstance(instance); for (Entry<String, String> entry : props.entrySet()) { NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue()) .build(); builder.addConfiguration(pair); } final ExecProcedureRequest request = ExecProcedureRequest.newBuilder().setProcedure(builder.build()) .build(); // run the procedure on the master ExecProcedureResponse response = executeCallable( new MasterCallable<ExecProcedureResponse>(getConnection()) { @Override public ExecProcedureResponse call(int callTimeout) throws ServiceException { return master.execProcedure(null, request); } }); long start = EnvironmentEdgeManager.currentTimeMillis(); long max = response.getExpectedTimeout(); long maxPauseTime = max / this.numRetries; int tries = 0; LOG.debug("Waiting a max of " + max + " ms for procedure '" + signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); boolean done = false; while (tries == 0 || ((EnvironmentEdgeManager.currentTimeMillis() - start) < max && !done)) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); sleep = sleep > maxPauseTime ? maxPauseTime : sleep; LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for procedure completion."); Thread.sleep(sleep); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); } LOG.debug("Getting current status of procedure from master..."); done = isProcedureFinished(signature, instance, props); } if (!done) { throw new IOException("Procedure '" + signature + " : " + instance + "' wasn't completed in expectedTime:" + max + " ms"); } }
From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java
/** * Execute Restore/Clone snapshot and wait for the server to complete (blocking). * To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to * create an HTable instance to this table before it is available. * @param snapshotName snapshot to restore * @param tableName table name to restore the snapshot on * @throws IOException if a remote or network exception occurs * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly *//*w w w . j av a2 s . c o m*/ private void internalRestoreSnapshot(final String snapshotName, final TableName tableName) throws IOException, RestoreSnapshotException { SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName) .setTable(tableName.getNameAsString()).build(); // actually restore the snapshot internalRestoreSnapshotAsync(snapshot); final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder().setSnapshot(snapshot) .build(); IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder().setDone(false) .buildPartial(); final long maxPauseTime = 5000; int tries = 0; while (!done.getDone()) { try { // sleep a backoff <= pauseTime amount long sleep = getPauseTime(tries++); sleep = sleep > maxPauseTime ? maxPauseTime : sleep; LOG.debug(tries + ") Sleeping: " + sleep + " ms while we wait for snapshot restore to complete."); Thread.sleep(sleep); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); } LOG.debug("Getting current status of snapshot restore from master..."); done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(getConnection()) { @Override public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException { return master.isRestoreSnapshotDone(null, request); } }); } if (!done.getDone()) { throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored."); } }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Adds a column.//w w w. j a va 2 s . co m * * @param tableName * @param hcd * @throws IOException * @throws KeeperException */ private void addColumnInternal(final String tableName, final HColumnDescriptor hcd) throws IOException, KeeperException { int tries = 0; for (; tries < numRetries * retryLongerMultiplier; ++tries) { if (znodes.lockTable(tableName)) { try { final HTableDescriptor htd = getTableDescriptor(tableName); if (htd.hasFamily(hcd.getName())) { throw new InvalidFamilyOperationException( "Family '" + hcd.getNameAsString() + "' already exists so cannot be added"); } final boolean tableAlreadyReplicated = isReplicatedTable(htd); htd.addFamily(hcd); if (LOG.isDebugEnabled()) { LOG.debug("Adding column " + hcd + " to the cross site table " + tableName); } TableState tableState = znodes.getTableState(tableName); if (LOG.isDebugEnabled()) { LOG.debug("The state of " + tableName + " is " + tableState); } if (!TableState.DISABLED.equals(tableState)) { if (TableState.ENABLED.equals(tableState)) { throw new TableNotDisabledException(tableName); } else if (TableState.ADDINGCOLUMN.equals(tableState)) { if (!htd.equals(znodes.getProposedTableDesc(tableName))) { throw new TableAbnormalStateException( "A previous incomplete addColumn request with different HColumnDescriptor" + " details! Please pass same details"); } LOG.info("Try to add a column for the cross site table " + tableName + " in the ADDINGCOLUMN state"); } else { throw new TableAbnormalStateException(tableName + ":" + tableState); } } Map<String, ClusterInfo> clusters = znodes.listClusterInfos(); znodes.writeProposedTableDesc(tableName, htd); // update the state to ADDINGCOLUMN znodes.setTableState(tableName, TableState.ADDINGCOLUMN); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); for (final Entry<String, ClusterInfo> entry : clusters.entrySet()) { results.add(pool.submit(new CrossSiteCallable<Void>(conf) { @Override public Void call() throws Exception { ClusterInfo ci = entry.getValue(); String mainClusterName = entry.getKey(); addColumn(configuration, mainClusterName, ci.getAddress(), tableName, hcd, false); // creates the table in peers. if (hcd.getScope() > 0) { HColumnDescriptor peerHCD = new HColumnDescriptor(hcd); peerHCD.setScope(0); if (ci.getPeers() != null && !ci.getPeers().isEmpty()) { for (ClusterInfo peer : ci.getPeers()) { String peerTableName = CrossSiteUtil.getPeerClusterTableName(tableName, mainClusterName, peer.getName()); if (tableAlreadyReplicated) { // Already this table is present in the peer. Just need to add the new // column to the table if (LOG.isDebugEnabled()) { LOG.debug("Adding column " + hcd + " to table " + peerTableName + " in the peer " + peer); } addColumn(configuration, mainClusterName, peer.getAddress(), tableName, peerHCD, true); } else { HBaseAdmin peerAdmin = createHBaseAmin(configuration, peer.getAddress()); try { if (peerAdmin.tableExists(peerTableName)) { // Already this table is present in the peer. Just need to add the new // column to the table if (LOG.isDebugEnabled()) { LOG.debug("Adding column " + hcd + " to table " + peerTableName + " in the peer " + peer); } addColumn(configuration, mainClusterName, peer.getAddress(), tableName, peerHCD, true); } else { // Till now there were no cfs in the table for replication. So tables // are // not yet there in peer. Create it with just one column (ie. this // column) if (LOG.isDebugEnabled()) { LOG.debug("Creating table " + peerTableName + " in peer cluster " + peer + " as newly added column " + hcd + " is replicatable"); } byte[][] splitKeys = getTableSplitsForCluster(tableName, entry.getKey()); HTableDescriptor peerHtd = new HTableDescriptor(htd); for (HColumnDescriptor hcd : peerHtd.getColumnFamilies()) { // only create the CFs that have the scope as 1. if (hcd.getScope() > 0) { hcd.setScope(0); } else { peerHtd.removeFamily(hcd.getName()); } } peerHtd.setName(Bytes.toBytes(peerTableName)); peerAdmin.createTable(peerHtd, splitKeys); } } finally { try { peerAdmin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin of peers", e); } } } } } } return null; } })); } try { for (Future<Void> result : results) { result.get(); } // modify the znodes to the {tableName}. znodes.modifyTableDesc(tableName, htd); znodes.setTableState(tableName, TableState.DISABLED); znodes.deleteProposedTableDesc(tableName); } catch (Exception e) { LOG.error("Fail to add a column to the table " + tableName, e); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.debug("The column of the cross site table " + tableName + " is added"); } return; } finally { znodes.unlockTable(tableName); } } if (tries < numRetries * retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site HTable enable"); } } } // All retries for acquiring locks failed! Thowing Exception throw new RetriesExhaustedException("Not able to acquire table lock after " + tries + " tries"); }
From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java
/** * Modifies the table.//ww w.j av a 2s .c om * * @param tableName * @param htd * @throws IOException * @throws KeeperException */ private void modifyTableInternal(final byte[] tableName, final HTableDescriptor htd) throws IOException, KeeperException { final String tableNameAsString = Bytes.toString(tableName); int tries = 0; for (; tries < numRetries * retryLongerMultiplier; ++tries) { if (znodes.lockTable(tableNameAsString)) { try { final HTableDescriptor oldHtd = getTableDescriptor(tableName); if (LOG.isDebugEnabled()) { LOG.debug("Start to modify the cross site table " + tableNameAsString); } TableState tableState = znodes.getTableState(tableNameAsString); if (LOG.isDebugEnabled()) { LOG.debug("The state of " + tableName + " is " + tableState.toString()); } if (!TableState.DISABLED.equals(tableState)) { if (TableState.ENABLED.equals(tableState)) { throw new TableNotDisabledException(tableNameAsString); } else if (TableState.MODIFYING.equals(tableState)) { if (!htd.equals(znodes.getProposedTableDesc(tableNameAsString))) { throw new TableAbnormalStateException( "A previous incomplete modifyTable request with different HColumnDescriptor" + " details! Please pass same details"); } LOG.info("Try to modify the cross site table " + tableName + " in the MODIFYING state"); } else { throw new TableAbnormalStateException(tableNameAsString + ":" + tableState.toString()); } } Map<String, ClusterInfo> clusterInfos = znodes.listClusterInfos(); znodes.writeProposedTableDesc(tableNameAsString, htd); // update the table state to MODIFYING znodes.setTableState(tableNameAsString, TableState.MODIFYING); // access the cluster one by one List<Future<Void>> results = new ArrayList<Future<Void>>(); for (final Entry<String, ClusterInfo> entry : clusterInfos.entrySet()) { results.add(pool.submit(new CrossSiteCallable<Void>(conf) { @Override public Void call() throws Exception { ClusterInfo ci = entry.getValue(); String clusterTableName = CrossSiteUtil.getClusterTableName(tableNameAsString, entry.getKey()); HTableDescriptor newHtd = new HTableDescriptor(htd); newHtd.setName(Bytes.toBytes(clusterTableName)); HBaseAdmin admin = createHBaseAmin(configuration, ci.getAddress()); try { admin.modifyTable(Bytes.toBytes(clusterTableName), newHtd); } finally { try { admin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin", e); } } if (isReplicatedTable(oldHtd)) { if (ci.getPeers() != null && !ci.getPeers().isEmpty()) { HTableDescriptor peerHtd = new HTableDescriptor(htd); for (HColumnDescriptor hcd : peerHtd.getColumnFamilies()) { if (hcd.getScope() > 0) { hcd.setScope(0); } else { peerHtd.removeFamily(hcd.getName()); } } for (ClusterInfo peer : ci.getPeers()) { String peerTableName = CrossSiteUtil.getPeerClusterTableName( tableNameAsString, entry.getKey(), peer.getName()); if (LOG.isDebugEnabled()) { LOG.debug("Creating the table " + peerTableName + " to the peer " + peer.getAddress()); } HBaseAdmin peerAdmin = createHBaseAmin(configuration, peer.getAddress()); try { peerHtd.setName(Bytes.toBytes(peerTableName)); disableTable(peerAdmin, peerTableName); peerAdmin.modifyTable(Bytes.toBytes(peerTableName), peerHtd); enableTable(peerAdmin, peerTableName); } finally { try { peerAdmin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin of peers", e); } } } } } else if (isReplicatedTable(newHtd)) { if (ci.getPeers() != null && !ci.getPeers().isEmpty()) { HTableDescriptor peerHtd = new HTableDescriptor(htd); for (HColumnDescriptor hcd : peerHtd.getColumnFamilies()) { if (hcd.getScope() > 0) { hcd.setScope(0); } else { peerHtd.removeFamily(hcd.getName()); } } byte[][] splitKeys = getTableSplitsForCluster(tableNameAsString, entry.getKey()); for (ClusterInfo peer : ci.getPeers()) { String peerTableName = CrossSiteUtil.getPeerClusterTableName( tableNameAsString, entry.getKey(), peer.getName()); peerHtd.setName(Bytes.toBytes(peerTableName)); HBaseAdmin peerAdmin = createHBaseAmin(configuration, peer.getAddress()); try { if (!peerAdmin.tableExists(peerTableName)) { if (LOG.isDebugEnabled()) { LOG.debug("Creating table " + peerTableName + " in peer cluster " + peer + " as the modified table " + newHtd + " is replicatable"); } peerAdmin.createTable(peerHtd, splitKeys); } else { disableTable(peerAdmin, peerTableName); peerAdmin.modifyTable(Bytes.toBytes(peerTableName), peerHtd); enableTable(peerAdmin, peerTableName); } } finally { try { peerAdmin.close(); } catch (IOException e) { LOG.warn("Fail to close the HBaseAdmin of peers", e); } } } } } return null; } })); } try { for (Future<Void> result : results) { result.get(); } // modify the znodes to the {tableName}. znodes.modifyTableDesc(tableNameAsString, htd); znodes.setTableState(tableNameAsString, TableState.DISABLED); znodes.deleteProposedTableDesc(tableNameAsString); } catch (Exception e) { LOG.error("Fail to modify the table " + tableNameAsString, e); throw new IOException(e); } if (LOG.isDebugEnabled()) { LOG.debug("The cross site table " + tableNameAsString + " is modified"); } return; } finally { znodes.unlockTable(tableNameAsString); } } if (tries < numRetries * retryLongerMultiplier - 1) { try { // Sleep Thread.sleep(getPauseTime(tries)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when waiting" + " for cross site HTable enable"); } } } // All retries for acquiring locks failed! Thowing Exception throw new RetriesExhaustedException("Not able to acquire table lock after " + tries + " tries"); }