Example usage for java.io InterruptedIOException InterruptedIOException

List of usage examples for java.io InterruptedIOException InterruptedIOException

Introduction

In this page you can find the example usage for java.io InterruptedIOException InterruptedIOException.

Prototype

public InterruptedIOException(String s) 

Source Link

Document

Constructs an InterruptedIOException with the specified detail message.

Usage

From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java

/**
 * Creates a new table with an initial set of empty regions defined by the
 * specified split keys.  The total number of regions created will be the
 * number of split keys plus one. Synchronous operation.
 * Note : Avoid passing empty split key.
 *
 * @param desc table descriptor for table
 * @param splitKeys array of split keys for the initial regions of the table
 *
 * @throws IllegalArgumentException if the table name is reserved, if the split keys
 * are repeated and if the split key has empty byte array.
 * @throws MasterNotRunningException if master is not running
 * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
 * threads, the table may have been created between test-for-existence
 * and attempt-at-creation).//from  w  w  w  . j a v  a 2  s .c  o  m
 * @throws IOException
 */
public void createTable(final HTableDescriptor desc, byte[][] splitKeys) throws IOException {
    try {
        createTableAsync(desc, splitKeys);
    } catch (SocketTimeoutException ste) {
        LOG.warn("Creating " + desc.getTableName() + " took too long", ste);
    }
    int numRegs = splitKeys == null ? 1 : splitKeys.length + 1;
    int prevRegCount = 0;
    boolean doneWithMetaScan = false;
    for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; ++tries) {
        if (!doneWithMetaScan) {
            // Wait for new table to come on-line
            final AtomicInteger actualRegCount = new AtomicInteger(0);
            MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
                @Override
                public boolean processRow(Result rowResult) throws IOException {
                    HRegionInfo info = HRegionInfo.getHRegionInfo(rowResult);
                    if (info == null) {
                        LOG.warn("No serialized HRegionInfo in " + rowResult);
                        return true;
                    }
                    if (!info.getTable().equals(desc.getTableName())) {
                        return false;
                    }
                    ServerName serverName = HRegionInfo.getServerName(rowResult);
                    // Make sure that regions are assigned to server
                    if (!(info.isOffline() || info.isSplit()) && serverName != null
                            && serverName.getHostAndPort() != null) {
                        actualRegCount.incrementAndGet();
                    }
                    return true;
                }
            };
            MetaScanner.metaScan(conf, connection, visitor, desc.getTableName());
            if (actualRegCount.get() < numRegs) {
                if (tries == this.numRetries * this.retryLongerMultiplier - 1) {
                    throw new RegionOfflineException("Only " + actualRegCount.get() + " of " + numRegs
                            + " regions are online; retries exhausted.");
                }
                try { // Sleep
                    Thread.sleep(getPauseTime(tries));
                } catch (InterruptedException e) {
                    throw new InterruptedIOException("Interrupted when opening" + " regions; "
                            + actualRegCount.get() + " of " + numRegs + " regions processed so far");
                }
                if (actualRegCount.get() > prevRegCount) { // Making progress
                    prevRegCount = actualRegCount.get();
                    tries = -1;
                }
            } else {
                doneWithMetaScan = true;
                tries = -1;
            }
        } else if (isTableEnabled(desc.getTableName())) {
            return;
        } else {
            try { // Sleep
                Thread.sleep(getPauseTime(tries));
            } catch (InterruptedException e) {
                throw new InterruptedIOException(
                        "Interrupted when waiting" + " for table to be enabled; meta scan was done");
            }
        }
    }
    throw new TableNotEnabledException(
            "Retries exhausted while still waiting for table: " + desc.getTableName() + " to be enabled");
}

From source file:org.apache.hadoop.hdfs.DataStreamer.java

/**
 * wait for the ack of seqno// w w w  . j av  a 2s . c  om
 *
 * @param seqno the sequence number to be acked
 * @throws IOException
 */
void waitForAckedSeqno(long seqno) throws IOException {
    TraceScope scope = dfsClient.getTracer().newScope("waitForAckedSeqno");
    try {
        if (canStoreFileInDB()) {
            LOG.debug(
                    "Stuffed Inode:  Closing File. Datanode ack skipped. All the data will be stored in the database");
        } else {
            if (DFSClient.LOG.isDebugEnabled()) {
                DFSClient.LOG.debug("Waiting for ack for: " + seqno);
            }
            long begin = Time.monotonicNow();
            try {
                synchronized (dataQueue) {
                    while (!streamerClosed) {
                        checkClosed();
                        if (lastAckedSeqno >= seqno) {
                            break;
                        }
                        try {
                            dataQueue.wait(1000); // when we receive an ack, we notify on
                            // dataQueue
                        } catch (InterruptedException ie) {
                            throw new InterruptedIOException(
                                    "Interrupted while waiting for data to be acknowledged by pipeline");
                        }
                    }
                }
                checkClosed();
            } catch (ClosedChannelException e) {
            }
            long duration = Time.monotonicNow() - begin;
            if (duration > dfsclientSlowLogThresholdMs) {
                DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration + "ms (threshold="
                        + dfsclientSlowLogThresholdMs + "ms)");
            }
        }
    } finally {
        scope.close();
    }
}

From source file:org.apache.hadoop.http.HttpServer3.java

/**
 * Start the server. Does not wait for the server to start.
 */// w w w .j  a  va 2s .  c o  m
public void start() throws IOException {
    try {
        try {
            openListeners();
            webServer.start();
        } catch (IOException ex) {
            LOG.info("HttpServer3.start() threw a non Bind IOException", ex);
            throw ex;
        } catch (MultiException ex) {
            LOG.info("HttpServer3.start() threw a MultiException", ex);
            throw ex;
        }
        // Make sure there is no handler failures.
        Handler[] handlers = webServer.getHandlers();
        for (Handler handler : handlers) {
            if (handler.isFailed()) {
                throw new IOException("Problem in starting http server. Server handlers failed");
            }
        }
        // Make sure there are no errors initializing the context.
        Throwable unavailableException = webAppContext.getUnavailableException();
        if (unavailableException != null) {
            // Have to stop the webserver, or else its non-daemon threads
            // will hang forever.
            webServer.stop();
            throw new IOException("Unable to initialize WebAppContext", unavailableException);
        }
    } catch (IOException e) {
        throw e;
    } catch (InterruptedException e) {
        throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server").initCause(e);
    } catch (Exception e) {
        throw new IOException("Problem starting http server", e);
    }
}

From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java

/**
 * Deletes a table.//from   w w w.  j a v  a2 s  .c om
 * Synchronous operation.
 *
 * @param tableName name of table to delete
 * @throws IOException if a remote or network exception occurs
 */
public void deleteTable(final TableName tableName) throws IOException {
    boolean tableExists = true;

    executeCallable(new MasterCallable<Void>(getConnection()) {
        @Override
        public Void call(int callTimeout) throws ServiceException {
            DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
            master.deleteTable(null, req);
            return null;
        }
    });

    int failures = 0;
    // Wait until all regions deleted
    for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
        try {
            HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
            Scan scan = MetaReader.getScanForTableName(tableName);
            scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
            ScanRequest request = RequestConverter
                    .buildScanRequest(firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
            Result[] values = null;
            // Get a batch at a time.
            ClientService.BlockingInterface server = connection.getClient(firstMetaServer.getServerName());
            PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
            try {
                controller.setPriority(tableName);
                ScanResponse response = server.scan(controller, request);
                values = ResponseConverter.getResults(controller.cellScanner(), response);
            } catch (ServiceException se) {
                throw ProtobufUtil.getRemoteException(se);
            }

            // let us wait until hbase:meta table is updated and
            // HMaster removes the table from its HTableDescriptors
            if (values == null || values.length == 0) {
                tableExists = false;
                GetTableDescriptorsResponse htds;
                MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
                try {
                    GetTableDescriptorsRequest req = RequestConverter
                            .buildGetTableDescriptorsRequest(tableName);
                    htds = master.getTableDescriptors(null, req);
                } catch (ServiceException se) {
                    throw ProtobufUtil.getRemoteException(se);
                } finally {
                    master.close();
                }
                tableExists = !htds.getTableSchemaList().isEmpty();
                if (!tableExists) {
                    break;
                }
            }
        } catch (IOException ex) {
            failures++;
            if (failures == numRetries - 1) { // no more tries left
                if (ex instanceof RemoteException) {
                    throw ((RemoteException) ex).unwrapRemoteException();
                } else {
                    throw ex;
                }
            }
        }
        try {
            Thread.sleep(getPauseTime(tries));
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Interrupted when waiting" + " for table to be deleted");
        }
    }

    if (tableExists) {
        throw new IOException("Retries exhausted, it took too long to wait" + " for the table " + tableName
                + " to be deleted.");
    }
    // Delete cached information to prevent clients from using old locations
    this.connection.clearRegionCache(tableName);
    LOG.info("Deleted " + tableName);
}

From source file:org.apache.hadoop.http.HttpServer2.java

/**
 * Start the server. Does not wait for the server to start.
 *//*  w  w  w  . j  a v  a 2 s.c  om*/
public void start() throws IOException {
    try {
        try {
            openListeners();
            webServer.start();
        } catch (IOException ex) {
            LOG.info("HttpServer.start() threw a non Bind IOException", ex);
            throw ex;
        } catch (MultiException ex) {
            LOG.info("HttpServer.start() threw a MultiException", ex);
            throw ex;
        }
        // Make sure there is no handler failures.
        Handler[] handlers = webServer.getHandlers();
        for (Handler handler : handlers) {
            if (handler.isFailed()) {
                throw new IOException("Problem in starting http server. Server handlers failed");
            }
        }
        // Make sure there are no errors initializing the context.
        Throwable unavailableException = webAppContext.getUnavailableException();
        if (unavailableException != null) {
            // Have to stop the webserver, or else its non-daemon threads
            // will hang forever.
            webServer.stop();
            throw new IOException("Unable to initialize WebAppContext", unavailableException);
        }
    } catch (IOException e) {
        throw e;
    } catch (InterruptedException e) {
        throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server").initCause(e);
    } catch (Exception e) {
        throw new IOException("Problem starting http server", e);
    }
}

From source file:org.apache.hadoop.hbase.http.HttpServer.java

/**
 * Start the server. Does not wait for the server to start.
 *//*  w  w w  .j av  a2s . c  om*/
public void start() throws IOException {
    try {
        try {
            openListeners();
            webServer.start();
        } catch (IOException ex) {
            LOG.info("HttpServer.start() threw a non Bind IOException", ex);
            throw ex;
        } catch (MultiException ex) {
            LOG.info("HttpServer.start() threw a MultiException", ex);
            throw ex;
        }
        // Make sure there is no handler failures.
        Handler[] handlers = webServer.getHandlers();
        for (int i = 0; i < handlers.length; i++) {
            if (handlers[i].isFailed()) {
                throw new IOException("Problem in starting http server. Server handlers failed");
            }
        }
        // Make sure there are no errors initializing the context.
        Throwable unavailableException = webAppContext.getUnavailableException();
        if (unavailableException != null) {
            // Have to stop the webserver, or else its non-daemon threads
            // will hang forever.
            webServer.stop();
            throw new IOException("Unable to initialize WebAppContext", unavailableException);
        }
    } catch (IOException e) {
        throw e;
    } catch (InterruptedException e) {
        throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server").initCause(e);
    } catch (Exception e) {
        throw new IOException("Problem starting http server", e);
    }
}

From source file:org.apache.hadoop.hbase.client.crosssite.CrossSiteHBaseAdmin.java

/**
 * Adds the cluster into the zookeeper. Meanwhile, checks whether there're cross-site tables, if
 * yes, to create those tables in the current cluster.
 * //from   ww  w .j  a va 2s  .  co m
 * @param name
 * @param address
 * @param createTableAgainIfAlreadyExists
 *          if true, delete the table and recreate it. If false, just enable it if the table is
 *          existent, create it if the table is not existent.
 * @throws IOException
 */
public void addCluster(final String name, final String address, final boolean createTableAgainIfAlreadyExists)
        throws IOException {
    CrossSiteUtil.validateClusterName(name);
    ZKUtil.transformClusterKey(address); // This is done to validate the address.
    Map<String, ClusterInfo> clusters = null;
    try {
        clusters = znodes.listClusterInfos();
    } catch (KeeperException e) {
        throw new IOException(e);
    }
    if (clusters.get(name) != null) {
        throw new IOException("A Cluster with same name '" + name + "' already available");
    }
    for (ClusterInfo cluster : clusters.values()) {
        if (cluster.getAddress().equals(address)) {
            throw new IOException("A cluster " + cluster + " with same address already available");
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Adding cluster " + name + ":" + address);
    }
    HBaseAdmin admin = null;
    try {
        // Add all the existing tables to this cluster
        String[] tableNames = znodes.getTableNames();
        if (tableNames != null && tableNames.length > 0) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Creating tables " + toString(tableNames) + " in the cluster " + name);
            }
            admin = createHBaseAmin(conf, address);
            List<Future<Void>> results = new ArrayList<Future<Void>>();
            for (final String tableName : tableNames) {
                final String clusterTableName = CrossSiteUtil.getClusterTableName(tableName, name);
                results.add(pool.submit(new CrossSiteCallable<Void>(admin) {
                    @Override
                    public Void call() throws Exception {
                        int tries = 0;
                        for (; tries < numRetries * retryLongerMultiplier; ++tries) {
                            if (znodes.lockTable(tableName)) {
                                try {
                                    HTableDescriptor htd = znodes.getTableDescAllowNull(tableName);
                                    if (htd == null) {
                                        return null;
                                    }
                                    htd.setName(Bytes.toBytes(clusterTableName));
                                    byte[][] splitKeys = getTableSplitsForCluster(tableName, name);
                                    boolean needCreate = true;
                                    if (hbaseAdmin.tableExists(clusterTableName)) {
                                        if (createTableAgainIfAlreadyExists) {
                                            if (LOG.isDebugEnabled()) {
                                                LOG.debug("Table " + clusterTableName
                                                        + " already present in the cluster " + name
                                                        + ". Going to drop it and create again");
                                            }
                                            disableTable(hbaseAdmin, clusterTableName);
                                            hbaseAdmin.deleteTable(clusterTableName);
                                        } else {
                                            if (LOG.isDebugEnabled()) {
                                                LOG.debug("Table " + clusterTableName
                                                        + " already present in the cluster " + name
                                                        + ". Going to enable it");
                                            }
                                            enableTable(hbaseAdmin, clusterTableName);
                                            needCreate = false;
                                        }
                                    }
                                    if (needCreate) {
                                        if (LOG.isDebugEnabled()) {
                                            LOG.debug("Creating table " + clusterTableName + " in the cluster "
                                                    + name);
                                        }
                                        hbaseAdmin.createTable(htd, splitKeys);
                                    }
                                } finally {
                                    znodes.unlockTable(tableName);
                                }
                                if (LOG.isDebugEnabled()) {
                                    LOG.debug("Created table " + clusterTableName + " in the cluster " + name);
                                }
                                return null;
                            }
                            if (tries < numRetries * retryLongerMultiplier - 1) {
                                try { // Sleep
                                    Thread.sleep(getPauseTime(tries));
                                } catch (InterruptedException e) {
                                    throw new InterruptedIOException(
                                            "Interrupted when waiting" + " for cross site HTable enable");
                                }
                            }
                        }
                        // All retries for acquiring locks failed! Throwing Exception
                        throw new RetriesExhaustedException(
                                "Not able to acquire table lock after " + tries + " tries");
                    }
                }));
            }
            for (Future<Void> result : results) {
                result.get();
            }

            LOG.info("The tables in the cluster " + name + " are created");
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Create the znode of the cluster " + name);
        }
        znodes.createClusterZNode(name, address);
    } catch (KeeperException e) {
        throw new IOException(e);
    } catch (InterruptedException e) {
        throw new IOException(e);
    } catch (ExecutionException e) {
        throw new IOException(e);
    } finally {
        if (admin != null) {
            try {
                admin.close();
            } catch (IOException e) {
                LOG.warn("Fail to close the HBaseAdmin", e);
            }
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("The cluster " + name + ":" + address + " is created");
    }
}

From source file:org.apache.hadoop.hbase.client.ConnectionImplementation.java

private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, boolean useCache, boolean retry,
        int replicaId) throws IOException {

    // If we are supposed to be using the cache, look in the cache to see if
    // we already have the region.
    if (useCache) {
        RegionLocations locations = getCachedLocation(tableName, row);
        if (locations != null && locations.getRegionLocation(replicaId) != null) {
            return locations;
        }//from   w ww.j  a  v  a2 s  .  c  o  m
    }

    // build the key of the meta region we should be looking for.
    // the extra 9's on the end are necessary to allow "exact" matches
    // without knowing the precise region names.
    byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);

    Scan s = new Scan();
    s.setReversed(true);
    s.setStartRow(metaKey);
    s.setSmall(true);
    s.setCaching(1);
    if (this.useMetaReplicas) {
        s.setConsistency(Consistency.TIMELINE);
    }

    int localNumRetries = (retry ? numTries : 1);

    for (int tries = 0; true; tries++) {
        if (tries >= localNumRetries) {
            throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row)
                    + " in " + tableName + " after " + localNumRetries + " tries.");
        }
        if (useCache) {
            RegionLocations locations = getCachedLocation(tableName, row);
            if (locations != null && locations.getRegionLocation(replicaId) != null) {
                return locations;
            }
        } else {
            // If we are not supposed to be using the cache, delete any existing cached location
            // so it won't interfere.
            metaCache.clearCache(tableName, row);
        }

        // Query the meta region
        try {
            Result regionInfoRow = null;
            ReversedClientScanner rcs = null;
            try {
                rcs = new ClientSmallReversedScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory,
                        rpcControllerFactory, getMetaLookupPool(), 0);
                regionInfoRow = rcs.next();
            } finally {
                if (rcs != null) {
                    rcs.close();
                }
            }

            if (regionInfoRow == null) {
                throw new TableNotFoundException(tableName);
            }

            // convert the row result into the HRegionLocation we need!
            RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
            if (locations == null || locations.getRegionLocation(replicaId) == null) {
                throw new IOException("HRegionInfo was null in " + tableName + ", row=" + regionInfoRow);
            }
            HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
            if (regionInfo == null) {
                throw new IOException("HRegionInfo was null or empty in " + TableName.META_TABLE_NAME + ", row="
                        + regionInfoRow);
            }

            // possible we got a region of a different table...
            if (!regionInfo.getTable().equals(tableName)) {
                throw new TableNotFoundException(
                        "Table '" + tableName + "' was not found, got: " + regionInfo.getTable() + ".");
            }
            if (regionInfo.isSplit()) {
                throw new RegionOfflineException("the only available region for"
                        + " the required row is a split parent," + " the daughters should be online soon: "
                        + regionInfo.getRegionNameAsString());
            }
            if (regionInfo.isOffline()) {
                throw new RegionOfflineException("the region is offline, could"
                        + " be caused by a disable table call: " + regionInfo.getRegionNameAsString());
            }

            ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
            if (serverName == null) {
                throw new NoServerForRegionException("No server address listed " + "in "
                        + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString()
                        + " containing row " + Bytes.toStringBinary(row));
            }

            if (isDeadServer(serverName)) {
                throw new RegionServerStoppedException(
                        "hbase:meta says the region " + regionInfo.getRegionNameAsString()
                                + " is managed by the server " + serverName + ", but it is dead.");
            }
            // Instantiate the location
            cacheLocation(tableName, locations);
            return locations;
        } catch (TableNotFoundException e) {
            // if we got this error, probably means the table just plain doesn't
            // exist. rethrow the error immediately. this should always be coming
            // from the HTable constructor.
            throw e;
        } catch (IOException e) {
            ExceptionUtil.rethrowIfInterrupt(e);

            if (e instanceof RemoteException) {
                e = ((RemoteException) e).unwrapRemoteException();
            }
            if (tries < localNumRetries - 1) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("locateRegionInMeta parentTable=" + TableName.META_TABLE_NAME + ", metaLocation="
                            + ", attempt=" + tries + " of " + localNumRetries
                            + " failed; retrying after sleep of "
                            + ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage());
                }
            } else {
                throw e;
            }
            // Only relocate the parent region if necessary
            if (!(e instanceof RegionOfflineException || e instanceof NoServerForRegionException)) {
                relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
            }
        }
        try {
            Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries));
        } catch (InterruptedException e) {
            throw new InterruptedIOException(
                    "Giving up trying to location region in " + "meta: thread is interrupted.");
        }
    }
}

From source file:org.apache.hadoop.hbase.client.HBaseAdmin.java

/**
 * Wait for the table to be enabled and available
 * If enabling the table exceeds the retry period, an exception is thrown.
 * @param tableName name of the table/*  ww  w . j  av a 2s .  c  o  m*/
 * @throws IOException if a remote or network exception occurs or
 *    table is not enabled after the retries period.
 */
private void waitUntilTableIsEnabled(final TableName tableName) throws IOException {
    boolean enabled = false;
    long start = EnvironmentEdgeManager.currentTimeMillis();
    for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
        try {
            enabled = isTableEnabled(tableName);
        } catch (TableNotFoundException tnfe) {
            // wait for table to be created
            enabled = false;
        }
        enabled = enabled && isTableAvailable(tableName);
        if (enabled) {
            break;
        }
        long sleep = getPauseTime(tries);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + "enabled in " + tableName);
        }
        try {
            Thread.sleep(sleep);
        } catch (InterruptedException e) {
            // Do this conversion rather than let it out because do not want to
            // change the method signature.
            throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e);
        }
    }
    if (!enabled) {
        long msec = EnvironmentEdgeManager.currentTimeMillis() - start;
        throw new IOException("Table '" + tableName + "' not yet enabled, after " + msec + "ms.");
    }
}

From source file:org.apache.hadoop.raid.RaidShell.java

/**
 * Submit a map/reduce job to raid the input paths
 * @param args all input parameters//from  w w w . ja v a 2s.com
 * @param startIndex staring index of arguments: policy_name path1, ..., pathn
 * @return 0 if successful
 * @throws java.io.IOException if any error occurs
 * @throws javax.xml.parsers.ParserConfigurationException
 * @throws ClassNotFoundException 
 * @throws org.apache.hadoop.raid.RaidConfigurationException
 * @throws org.xml.sax.SAXException
 */
private int distRaid(String[] args, int startIndex) throws IOException, SAXException,
        RaidConfigurationException, ClassNotFoundException, ParserConfigurationException, JSONException {
    // find the matched raid policy
    String policyName = args[startIndex++];
    ConfigManager configManager = new ConfigManager(conf);
    PolicyInfo policy = configManager.getPolicy(policyName);
    if (policy == null) {
        System.err.println("Invalid policy: " + policyName);
        return -1;
    }
    Codec codec = Codec.getCodec(policy.getCodecId());
    if (codec == null) {
        System.err.println("Policy " + policyName + " with invalid codec " + policy.getCodecId());
    }

    // find the matched paths to raid
    FileSystem fs = FileSystem.get(conf);
    List<FileStatus> pathsToRaid = new ArrayList<FileStatus>();
    List<Path> policySrcPaths = policy.getSrcPathExpanded();
    for (int i = startIndex; i < args.length; i++) {
        boolean invalidPathToRaid = true;
        Path pathToRaid = new Path(args[i]).makeQualified(fs);
        String pathToRaidStr = pathToRaid.toString();
        if (!pathToRaidStr.endsWith(Path.SEPARATOR)) {
            pathToRaidStr = pathToRaidStr.concat(Path.SEPARATOR);
        }
        for (Path srcPath : policySrcPaths) {
            String srcStr = srcPath.toString();
            if (!srcStr.endsWith(Path.SEPARATOR)) {
                srcStr = srcStr.concat(Path.SEPARATOR);
            }
            if (pathToRaidStr.startsWith(srcStr)) {
                if (codec.isDirRaid) {
                    FileUtil.listStatusForLeafDir(fs, fs.getFileStatus(pathToRaid), pathsToRaid);
                } else {
                    FileUtil.listStatusHelper(fs, pathToRaid, Integer.MAX_VALUE, pathsToRaid);
                }
                invalidPathToRaid = false;
                break;
            }
        }
        if (invalidPathToRaid) {
            System.err.println("Path " + pathToRaidStr + " does not support by the given policy " + policyName);
        }
    }

    // Check if files are valid
    List<FileStatus> validPaths = new ArrayList<FileStatus>();
    List<PolicyInfo> policyInfos = new ArrayList<PolicyInfo>(1);
    policyInfos.add(policy);
    RaidState.Checker checker = new RaidState.Checker(policyInfos, conf);
    long now = System.currentTimeMillis();
    for (FileStatus fileStatus : pathsToRaid) {
        FileStatus[] dirStats = null;
        if (codec.isDirRaid) {
            dirStats = fs.listStatus(fileStatus.getPath());
        }
        RaidState stat = checker.check(policy, fileStatus, now, false,
                dirStats == null ? null : Arrays.asList(dirStats));
        if (stat == RaidState.NOT_RAIDED_BUT_SHOULD) {
            validPaths.add(fileStatus);
        } else {
            System.err.println("Path " + fileStatus.getPath() + " is not qualified for raiding: " + stat);
        }
    }
    if (validPaths.isEmpty()) {
        System.err.println("No file can be raided");
        return 0;
    }
    DistRaid dr = new DistRaid(conf);
    //add paths for distributed raiding
    List<EncodingCandidate> validEC = RaidNode.splitPaths(conf, Codec.getCodec(policy.getCodecId()),
            validPaths);
    dr.addRaidPaths(policy, validEC);

    if (dr.startDistRaid()) {
        System.out.println("Job started: " + dr.getJobTrackingURL());
        System.out.print("Job in progress ");
        while (!dr.checkComplete()) {
            try {
                System.out.print(".");
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                throw new InterruptedIOException("Got interrupted.");
            }
        }
        if (dr.successful()) {
            System.out.println("/nFiles are successfully raided.");
            return 0;
        } else {
            System.err.println("/nRaid job failed.");
            return -1;
        }
    }
    return -1;
}