Example usage for java.util.concurrent ExecutorCompletionService submit

List of usage examples for java.util.concurrent ExecutorCompletionService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorCompletionService submit.

Prototype

public Future<V> submit(Callable<V> task) 

Source Link

Usage

From source file:com.baidu.rigel.biplatform.tesseract.isservice.search.service.impl.SearchIndexServiceImpl.java

@Override
public SearchIndexResultSet query(QueryRequest query) throws IndexAndSearchException {
    ExecutorCompletionService<SearchIndexResultSet> completionService = new ExecutorCompletionService<>(
            taskExecutor);//from w  w w  .j a  va 2s .  co  m
    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_BEGIN, "query", "[query:" + query + "]"));
    // 1. Does all the existed index cover this query
    // 2. get index meta and index shard
    // 3. trans query to Query that can used for searching
    // 4. dispatch search query
    // 5. do search
    // 6. merge result
    // 7. return

    if (query == null || StringUtils.isEmpty(query.getCubeId())) {
        LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "query",
                "[query:" + query + "]"));
        throw new IndexAndSearchException(
                TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                        IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION),
                IndexAndSearchExceptionType.ILLEGALARGUMENT_EXCEPTION);
    }
    IndexMeta idxMeta = this.idxMetaService.getIndexMetaByCubeId(query.getCubeId(),
            query.getDataSourceInfo().getDataSourceKey());

    SearchIndexResultSet result = null;
    long current = System.currentTimeMillis();
    if (idxMeta == null || idxMeta.getIdxState().equals(IndexState.INDEX_UNAVAILABLE)
            || idxMeta.getIdxState().equals(IndexState.INDEX_UNINIT) || !query.isUseIndex()
            || (query.getFrom() != null && query.getFrom().getFrom() != null
                    && !idxMeta.getDataDescInfo().getTableNameList().contains(query.getFrom().getFrom()))
            || !indexMetaContains(idxMeta, query)) {
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
                "use database"));
        // index does not exist or unavailable,use db query
        SqlQuery sqlQuery = QueryRequestUtil.transQueryRequest2SqlQuery(query);
        SqlDataSourceWrap dataSourceWrape = null;
        try {
            dataSourceWrape = (SqlDataSourceWrap) this.dataSourcePoolService
                    .getDataSourceByKey(query.getDataSourceInfo());
        } catch (DataSourceException e) {
            LOGGER.error(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_EXCEPTION, "query",
                    "[query:" + query + "]", e));
            throw new IndexAndSearchException(
                    TesseractExceptionUtils.getExceptionMessage(IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                            IndexAndSearchExceptionType.SQL_EXCEPTION),
                    e, IndexAndSearchExceptionType.SQL_EXCEPTION);
        }
        if (dataSourceWrape == null) {
            throw new IllegalArgumentException();
        }

        long limitStart = 0;
        long limitSize = 0;
        if (query.getLimit() != null) {
            limitStart = query.getLimit().getStart();
            if (query.getLimit().getSize() > 0) {
                limitSize = query.getLimit().getSize();
            }

        }
        SearchIndexResultSet currResult = this.dataQueryService.queryForListWithSQLQueryAndGroupBy(sqlQuery,
                dataSourceWrape, limitStart, limitSize, query);
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
                "db return " + currResult.size() + " records"));
        result = currResult;
    } else {
        LOGGER.info(
                String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query", "use index"));

        LOGGER.info("cost :" + (System.currentTimeMillis() - current) + " before prepare get record.");
        current = System.currentTimeMillis();

        List<SearchIndexResultSet> idxShardResultSetList = new ArrayList<SearchIndexResultSet>();
        for (IndexShard idxShard : idxMeta.getIdxShardList()) {

            if (idxShard.getIdxState().equals(IndexState.INDEX_UNINIT)) {
                continue;
            }

            completionService.submit(new Callable<SearchIndexResultSet>() {

                @Override
                public SearchIndexResultSet call() throws Exception {
                    try {
                        long current = System.currentTimeMillis();
                        Node searchNode = isNodeService.getFreeSearchNodeByIndexShard(idxShard,
                                idxMeta.getClusterName());
                        searchNode.searchRequestCountAdd();
                        isNodeService.saveOrUpdateNodeInfo(searchNode);
                        LOGGER.info("begin search in shard:{}", idxShard);
                        SearchIndexResultSet result = (SearchIndexResultSet) isClient
                                .search(query, idxShard, searchNode).getMessageBody();
                        searchNode.searchrequestCountSub();
                        isNodeService.saveOrUpdateNodeInfo(searchNode);
                        LOGGER.info("compelete search in shard:{},take:{} ms", idxShard,
                                System.currentTimeMillis() - current);
                        return result;
                    } catch (Exception e) {
                        throw new IndexAndSearchException(
                                TesseractExceptionUtils.getExceptionMessage(
                                        IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                                        IndexAndSearchExceptionType.NETWORK_EXCEPTION),
                                e, IndexAndSearchExceptionType.NETWORK_EXCEPTION);
                    }

                }
            });
        }
        for (int i = 0; i < idxMeta.getIdxShardList().size(); i++) {
            try {
                idxShardResultSetList.add(completionService.take().get());
            } catch (InterruptedException | ExecutionException e) {
                throw new IndexAndSearchException(
                        TesseractExceptionUtils.getExceptionMessage(
                                IndexAndSearchException.QUERYEXCEPTION_MESSAGE,
                                IndexAndSearchExceptionType.NETWORK_EXCEPTION),
                        e, IndexAndSearchExceptionType.NETWORK_EXCEPTION);
            }
        }
        LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
                "merging result from multiple index"));
        result = mergeResultSet(idxShardResultSetList, query);
        StringBuilder sb = new StringBuilder();
        sb.append("cost :").append(System.currentTimeMillis() - current)
                .append(" in get result record,result size:").append(result.size()).append(" shard size:")
                .append(idxShardResultSetList.size());

        LOGGER.info(sb.toString());
        current = System.currentTimeMillis();
    }

    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_PROCESS_NO_PARAM, "query",
            "merging final result"));

    LOGGER.info(String.format(LogInfoConstants.INFO_PATTERN_FUNCTION_END, "query", "[query:" + query + "]"));
    return result;
}

From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java

private void putEvents(final KafkaChannel channel, final List<List<Event>> events,
        ExecutorCompletionService<Void> submitterSvc) {
    for (int i = 0; i < 5; i++) {
        final int index = i;
        submitterSvc.submit(new Callable<Void>() {
            @Override/*from  w  ww.  jav  a2s  .  c o m*/
            public Void call() {
                Transaction tx = channel.getTransaction();
                tx.begin();
                List<Event> eventsToPut = events.get(index);
                for (int j = 0; j < 10; j++) {
                    channel.put(eventsToPut.get(j));
                }
                try {
                    tx.commit();
                } finally {
                    tx.close();
                }
                return null;
            }
        });
    }
}

From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java

private List<Event> pullEvents(final KafkaChannel channel, ExecutorCompletionService<Void> submitterSvc,
        final int total, final boolean testRollbacks, final boolean retryAfterRollback) {
    final List<Event> eventsPulled = Collections.synchronizedList(new ArrayList<Event>(50));
    final CyclicBarrier barrier = new CyclicBarrier(5);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicInteger rolledBackCount = new AtomicInteger(0);
    final AtomicBoolean startedGettingEvents = new AtomicBoolean(false);
    final AtomicBoolean rolledBack = new AtomicBoolean(false);
    for (int k = 0; k < 5; k++) {
        final int index = k;
        submitterSvc.submit(new Callable<Void>() {
            @Override// w w  w.  j  a va 2  s  .  co  m
            public Void call() throws Exception {
                Transaction tx = null;
                final List<Event> eventsLocal = Lists.newLinkedList();
                int takenByThisThread = 0;
                channel.registerThread();
                Thread.sleep(1000);
                barrier.await();
                while (counter.get() < (total - rolledBackCount.get())) {
                    if (tx == null) {
                        tx = channel.getTransaction();
                        tx.begin();
                    }
                    try {
                        Event e = channel.take();
                        if (e != null) {
                            startedGettingEvents.set(true);
                            eventsLocal.add(e);
                        } else {
                            if (testRollbacks && index == 4 && (!rolledBack.get())
                                    && startedGettingEvents.get()) {
                                tx.rollback();
                                tx.close();
                                tx = null;
                                rolledBack.set(true);
                                final int eventsLocalSize = eventsLocal.size();
                                eventsLocal.clear();
                                if (!retryAfterRollback) {
                                    rolledBackCount.set(eventsLocalSize);
                                    return null;
                                }
                            } else {
                                tx.commit();
                                tx.close();
                                tx = null;
                                eventsPulled.addAll(eventsLocal);
                                counter.getAndAdd(eventsLocal.size());
                                eventsLocal.clear();
                            }
                        }
                    } catch (Exception ex) {
                        eventsLocal.clear();
                        if (tx != null) {
                            tx.rollback();
                            tx.close();
                        }
                        tx = null;
                        ex.printStackTrace();
                    }
                }
                // Close txn.
                return null;
            }
        });
    }
    return eventsPulled;
}

From source file:org.apache.hadoop.hbase.client.SpeculativeMutater.java

public Boolean mutate(final long waitToSendFailover, final long waitToSendFailoverWithException,
        final HBaseTableFunction<Void> function, final HTableInterface primaryTable,
        final Collection<HTableInterface> failoverTables, final AtomicLong lastPrimaryFail,
        final int waitTimeFromLastPrimaryFail) {
    ExecutorCompletionService<Boolean> exeS = new ExecutorCompletionService<Boolean>(exe);

    ArrayList<Callable<Boolean>> callables = new ArrayList<Callable<Boolean>>();

    final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false);
    final long startTime = System.currentTimeMillis();
    final long lastPrimaryFinalFail = lastPrimaryFail.get();

    if (System.currentTimeMillis() - lastPrimaryFinalFail > 5000) {
        callables.add(new Callable<Boolean>() {
            public Boolean call() throws Exception {
                try {
                    LOG.info(" --- CallingPrimary.1:" + isPrimarySuccess.get() + ", "
                            + (System.currentTimeMillis() - startTime));
                    function.call(primaryTable);
                    LOG.info(" --- CallingPrimary.2:" + isPrimarySuccess.get() + ", "
                            + (System.currentTimeMillis() - startTime));
                    isPrimarySuccess.set(true);
                    return true;
                } catch (java.io.InterruptedIOException e) {
                    Thread.currentThread().interrupt();
                } catch (Exception e) {
                    lastPrimaryFail.set(System.currentTimeMillis());
                    Thread.currentThread().interrupt();
                }//from w  w  w . j a  v  a 2  s . c  o  m
                return null;
            }
        });
    }

    for (final HTableInterface failoverTable : failoverTables) {
        callables.add(new Callable<Boolean>() {

            public Boolean call() throws Exception {
                long waitToRequest = (System.currentTimeMillis() - lastPrimaryFinalFail > 5000)
                        ? waitToSendFailover - (System.currentTimeMillis() - startTime)
                        : waitToSendFailoverWithException - (System.currentTimeMillis() - startTime);

                LOG.info(" --- waitToRequest:" + waitToRequest + ","
                        + (System.currentTimeMillis() - lastPrimaryFinalFail) + ","
                        + (waitToSendFailover - (System.currentTimeMillis() - startTime)) + ","
                        + (waitToSendFailoverWithException - (System.currentTimeMillis() - startTime)));

                if (waitToRequest > 0) {
                    Thread.sleep(waitToRequest);
                }
                LOG.info(" --- isPrimarySuccess.get():" + isPrimarySuccess.get());
                if (isPrimarySuccess.get() == false) {
                    LOG.info(" --- CallingFailOver.1:" + isPrimarySuccess.get() + ", "
                            + (System.currentTimeMillis() - startTime));
                    function.call(failoverTable);
                    LOG.info(" --- CallingFailOver.2:" + isPrimarySuccess.get() + ", "
                            + (System.currentTimeMillis() - startTime));
                }

                return false;
            }
        });
    }
    try {

        for (Callable<Boolean> call : callables) {
            exeS.submit(call);
        }
        Boolean result = exeS.take().get();
        return result;
    } catch (InterruptedException e) {
        e.printStackTrace();
        LOG.error(e);
    } catch (ExecutionException e) {
        e.printStackTrace();
        LOG.error(e);
    }
    return null;
}

From source file:org.apache.hadoop.hbase.client.SpeculativeRequester.java

public ResultWrapper<T> request(final HBaseTableFunction<T> function, final HTableInterface primaryTable,
        final Collection<HTableInterface> failoverTables) {

    ExecutorCompletionService<ResultWrapper<T>> exeS = new ExecutorCompletionService<ResultWrapper<T>>(exe);

    final AtomicBoolean isPrimarySuccess = new AtomicBoolean(false);
    final long startTime = System.currentTimeMillis();

    ArrayList<Callable<ResultWrapper<T>>> callables = new ArrayList<Callable<ResultWrapper<T>>>();

    if (System.currentTimeMillis() - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail) {
        callables.add(new Callable<ResultWrapper<T>>() {
            public ResultWrapper<T> call() throws Exception {
                try {
                    T t = function.call(primaryTable);
                    isPrimarySuccess.set(true);
                    return new ResultWrapper(true, t);
                } catch (java.io.InterruptedIOException e) {
                    Thread.currentThread().interrupt();
                } catch (Exception e) {
                    lastPrimaryFail.set(System.currentTimeMillis());
                    Thread.currentThread().interrupt();
                }// w w w  .  ja  v  a  2s  .c  o  m
                return null;
            }
        });
    }

    for (final HTableInterface failoverTable : failoverTables) {
        callables.add(new Callable<ResultWrapper<T>>() {

            public ResultWrapper<T> call() throws Exception {

                long waitToRequest = (System.currentTimeMillis()
                        - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail)
                                ? waitTimeBeforeRequestingFailover - (System.currentTimeMillis() - startTime)
                                : 0;

                if (waitToRequest > 0) {
                    Thread.sleep(waitToRequest);
                }
                if (isPrimarySuccess.get() == false) {
                    T t = function.call(failoverTable);

                    long waitToAccept = (System.currentTimeMillis()
                            - lastPrimaryFail.get() > waitTimeFromLastPrimaryFail)
                                    ? waitTimeBeforeAcceptingResults - (System.currentTimeMillis() - startTime)
                                    : 0;
                    if (isPrimarySuccess.get() == false) {
                        if (waitToAccept > 0) {
                            Thread.sleep(waitToAccept);
                        }
                    }

                    return new ResultWrapper(false, t);
                } else {
                    throw new RuntimeException("Not needed");
                }

            }
        });
    }
    try {

        //ResultWrapper<T> t = exe.invokeAny(callables);
        for (Callable<ResultWrapper<T>> call : callables) {
            exeS.submit(call);
        }

        ResultWrapper<T> result = exeS.take().get();
        //exe.shutdownNow();

        return result;
    } catch (InterruptedException e) {
        e.printStackTrace();
        LOG.error(e);
    } catch (ExecutionException e) {
        e.printStackTrace();
        LOG.error(e);
    }
    return null;

}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

protected void testConcurrentReadingInternals() throws IOException, InterruptedException, ExecutionException {
    for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
        Path path = new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
        Random rand = defaultRandom();
        List<Long> offsets = new ArrayList<Long>();
        List<BlockType> types = new ArrayList<BlockType>();
        writeBlocks(rand, compressAlgo, path, offsets, null, types, null);
        FSDataInputStream is = fs.open(path);
        long fileSize = fs.getFileStatus(path).getLen();
        HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true)
                .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag)
                .withCompression(compressAlgo).build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderV2(is, fileSize, meta);

        Executor exec = Executors.newFixedThreadPool(NUM_READER_THREADS);
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);

        for (int i = 0; i < NUM_READER_THREADS; ++i) {
            ecs.submit(new BlockReaderThread("reader_" + (char) ('A' + i), hbr, offsets, types, fileSize));
        }/*from  w  ww  .  ja  v a2  s  .c  o  m*/

        for (int i = 0; i < NUM_READER_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
            if (detailedLogging) {
                LOG.info(String.valueOf(i + 1) + " reader threads finished successfully (algo=" + compressAlgo
                        + ")");
            }
        }

        is.close();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.HFileReadWriteTest.java

public boolean runRandomReadWorkload() throws IOException {
    if (inputFileNames.size() != 1) {
        throw new IOException("Need exactly one input file for random reads: " + inputFileNames);
    }/* ww w  .j  a v a 2 s  .  com*/

    Path inputPath = new Path(inputFileNames.get(0));

    // Make sure we are using caching.
    StoreFile storeFile = openStoreFile(inputPath, true);

    StoreFile.Reader reader = storeFile.createReader();

    LOG.info("First key: " + Bytes.toStringBinary(reader.getFirstKey()));
    LOG.info("Last key: " + Bytes.toStringBinary(reader.getLastKey()));

    KeyValue firstKV = KeyValue.createKeyValueFromKey(reader.getFirstKey());
    firstRow = firstKV.getRow();

    KeyValue lastKV = KeyValue.createKeyValueFromKey(reader.getLastKey());
    lastRow = lastKV.getRow();

    byte[] family = firstKV.getFamily();
    if (!Bytes.equals(family, lastKV.getFamily())) {
        LOG.error("First and last key have different families: " + Bytes.toStringBinary(family) + " and "
                + Bytes.toStringBinary(lastKV.getFamily()));
        return false;
    }

    if (Bytes.equals(firstRow, lastRow)) {
        LOG.error("First and last row are the same, cannot run read workload: " + "firstRow="
                + Bytes.toStringBinary(firstRow) + ", " + "lastRow=" + Bytes.toStringBinary(lastRow));
        return false;
    }

    ExecutorService exec = Executors.newFixedThreadPool(numReadThreads + 1);
    int numCompleted = 0;
    int numFailed = 0;
    try {
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        endTime = System.currentTimeMillis() + 1000 * durationSec;
        boolean pread = true;
        for (int i = 0; i < numReadThreads; ++i)
            ecs.submit(new RandomReader(i, reader, pread));
        ecs.submit(new StatisticsPrinter());
        Future<Boolean> result;
        while (true) {
            try {
                result = ecs.poll(endTime + 1000 - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
                if (result == null)
                    break;
                try {
                    if (result.get()) {
                        ++numCompleted;
                    } else {
                        ++numFailed;
                    }
                } catch (ExecutionException e) {
                    LOG.error("Worker thread failure", e.getCause());
                    ++numFailed;
                }
            } catch (InterruptedException ex) {
                LOG.error("Interrupted after " + numCompleted + " workers completed");
                Thread.currentThread().interrupt();
                continue;
            }

        }
    } finally {
        storeFile.closeReader(true);
        exec.shutdown();

        BlockCache c = cacheConf.getBlockCache();
        if (c != null) {
            c.shutdown();
        }
    }
    LOG.info("Worker threads completed: " + numCompleted);
    LOG.info("Worker threads failed: " + numFailed);
    return true;
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV1.java

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor,
        final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs));
    if (regions == null) {
        LOG.info("No regions under directory:" + snapshotDir);
        return null;
    }/*from w  w w .j a  va 2 s .c  o  m*/

    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>(
            executor);
    for (final FileStatus region : regions) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {
            @Override
            public SnapshotRegionManifest call() throws IOException {
                HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, region.getPath());
                return buildManifestFromDisk(conf, fs, snapshotDir, hri);
            }
        });
    }

    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(regions.length);
    try {
        for (int i = 0; i < regions.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotManifestV2.java

static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, final Executor executor,
        final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException {
    FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
        @Override//ww  w. j  a  va  2  s  .com
        public boolean accept(Path path) {
            return path.getName().startsWith(SNAPSHOT_MANIFEST_PREFIX);
        }
    });

    if (manifestFiles == null || manifestFiles.length == 0)
        return null;

    final ExecutorCompletionService<SnapshotRegionManifest> completionService = new ExecutorCompletionService<SnapshotRegionManifest>(
            executor);
    for (final FileStatus st : manifestFiles) {
        completionService.submit(new Callable<SnapshotRegionManifest>() {
            @Override
            public SnapshotRegionManifest call() throws IOException {
                FSDataInputStream stream = fs.open(st.getPath());
                try {
                    return SnapshotRegionManifest.parseFrom(stream);
                } finally {
                    stream.close();
                }
            }
        });
    }

    ArrayList<SnapshotRegionManifest> regionsManifest = new ArrayList<SnapshotRegionManifest>(
            manifestFiles.length);
    try {
        for (int i = 0; i < manifestFiles.length; ++i) {
            regionsManifest.add(completionService.take().get());
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    } catch (ExecutionException e) {
        IOException ex = new IOException();
        ex.initCause(e.getCause());
        throw ex;
    }
    return regionsManifest;
}

From source file:org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil.java

public static void concurrentVisitReferencedFiles(final Configuration conf, final FileSystem fs,
        final SnapshotManifest manifest, final StoreFileVisitor visitor) throws IOException {
    final SnapshotDescription snapshotDesc = manifest.getSnapshotDescription();
    final Path snapshotDir = manifest.getSnapshotDir();

    List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
    if (regionManifests == null || regionManifests.size() == 0) {
        LOG.debug("No manifest files present: " + snapshotDir);
        return;//  w w  w  . j av  a 2 s.c  om
    }

    ExecutorService exec = SnapshotManifest.createExecutor(conf, "VerifySnapshot");
    final ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(exec);
    try {
        for (final SnapshotRegionManifest regionManifest : regionManifests) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws IOException {
                    visitRegionStoreFiles(regionManifest, visitor);
                    return null;
                }
            });
        }
        try {
            for (int i = 0; i < regionManifests.size(); ++i) {
                completionService.take().get();
            }
        } catch (InterruptedException e) {
            throw new InterruptedIOException(e.getMessage());
        } catch (ExecutionException e) {
            if (e.getCause() instanceof CorruptedSnapshotException) {
                throw new CorruptedSnapshotException(e.getCause().getMessage(), snapshotDesc);
            } else {
                IOException ex = new IOException();
                ex.initCause(e.getCause());
                throw ex;
            }
        }
    } finally {
        exec.shutdown();
    }
}