Example usage for java.util.concurrent ExecutionException getCause

List of usage examples for java.util.concurrent ExecutionException getCause

Introduction

In this page you can find the example usage for java.util.concurrent ExecutionException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:org.apache.hadoop.hbase.util.HBaseFsck.java

/**
 * Populate hbi's from regionInfos loaded from file system.
 *///from   w ww.  j a va 2 s.  co m
private SortedMap<TableName, TableInfo> loadHdfsRegionInfos() throws IOException, InterruptedException {
    tablesInfo.clear(); // regenerating the data
    // generate region split structure
    Collection<HbckInfo> hbckInfos = regionInfoMap.values();

    // Parallelized read of .regioninfo files.
    List<WorkItemHdfsRegionInfo> hbis = new ArrayList<WorkItemHdfsRegionInfo>(hbckInfos.size());
    List<Future<Void>> hbiFutures;

    for (HbckInfo hbi : hbckInfos) {
        WorkItemHdfsRegionInfo work = new WorkItemHdfsRegionInfo(hbi, this, errors);
        hbis.add(work);
    }

    // Submit and wait for completion
    hbiFutures = executor.invokeAll(hbis);

    for (int i = 0; i < hbiFutures.size(); i++) {
        WorkItemHdfsRegionInfo work = hbis.get(i);
        Future<Void> f = hbiFutures.get(i);
        try {
            f.get();
        } catch (ExecutionException e) {
            LOG.warn("Failed to read .regioninfo file for region " + work.hbi.getRegionNameAsString(),
                    e.getCause());
        }
    }

    Path hbaseRoot = FSUtils.getRootDir(getConf());
    FileSystem fs = hbaseRoot.getFileSystem(getConf());
    // serialized table info gathering.
    for (HbckInfo hbi : hbckInfos) {

        if (hbi.getHdfsHRI() == null) {
            // was an orphan
            continue;
        }

        // get table name from hdfs, populate various HBaseFsck tables.
        TableName tableName = hbi.getTableName();
        if (tableName == null) {
            // There was an entry in hbase:meta not in the HDFS?
            LOG.warn("tableName was null for: " + hbi);
            continue;
        }

        TableInfo modTInfo = tablesInfo.get(tableName);
        if (modTInfo == null) {
            // only executed once per table.
            modTInfo = new TableInfo(tableName);
            tablesInfo.put(tableName, modTInfo);
            try {
                HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
                modTInfo.htds.add(htd);
            } catch (IOException ioe) {
                if (!orphanTableDirs.containsKey(tableName)) {
                    LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
                    //should only report once for each table
                    errors.reportError(ERROR_CODE.NO_TABLEINFO_FILE,
                            "Unable to read .tableinfo from " + hbaseRoot + "/" + tableName);
                    Set<String> columns = new HashSet<String>();
                    orphanTableDirs.put(tableName, getColumnFamilyList(columns, hbi));
                }
            }
        }
        if (!hbi.isSkipChecks()) {
            modTInfo.addRegionInfo(hbi);
        }
    }

    loadTableInfosForTablesWithNoRegion();

    return tablesInfo;
}