Example usage for java.util.concurrent ExecutionException getCause

List of usage examples for java.util.concurrent ExecutionException getCause

Introduction

In this page you can find the example usage for java.util.concurrent ExecutionException getCause.

Prototype

public synchronized Throwable getCause() 

Source Link

Document

Returns the cause of this throwable or null if the cause is nonexistent or unknown.

Usage

From source file:com.amazon.janusgraph.diskstorage.dynamodb.DynamoDBDelegate.java

public BackendException unwrapExecutionException(ExecutionException e, String apiName) {
    final Throwable cause = e.getCause();
    if (cause instanceof BackendException) {
        return (BackendException) cause; //already translated
    } else {/*from  www.java2  s.c o m*/
        //ok not to drill down to specific because would have thrown permanentbackend exception for other
        return processDynamoDBAPIException(cause, apiName, null /*tableName*/);
    }
}

From source file:org.apache.tez.runtime.LogicalIOProcessorRuntimeTask.java

/**
 * @throws Exception//from   w  w  w .  ja v a  2s.  c  om
 */
public void initialize() throws Exception {
    Preconditions.checkState(this.state.get() == State.NEW, "Already initialized");
    this.state.set(State.INITED);
    if (this.tezCounters != null) {
        this.initStartTimeNs = System.nanoTime();
    }

    this.processorContext = createProcessorContext();
    this.processor = createProcessor(processorDescriptor.getClassName(), processorContext);

    if (initializeProcessorFirst || initializeProcessorIOSerially) {
        // Initialize processor in the current thread.
        initializeLogicalIOProcessor();
    }
    int numTasks = 0;

    int inputIndex = 0;
    for (InputSpec inputSpec : taskSpec.getInputs()) {
        this.initializerCompletionService.submit(new InitializeInputCallable(inputSpec, inputIndex++));
        numTasks++;
    }

    int outputIndex = 0;
    for (OutputSpec outputSpec : taskSpec.getOutputs()) {
        this.initializerCompletionService.submit(new InitializeOutputCallable(outputSpec, outputIndex++));
        numTasks++;
    }

    if (!(initializeProcessorFirst || initializeProcessorIOSerially)) {
        // Initialize processor in the current thread.
        initializeLogicalIOProcessor();
    }
    int completedTasks = 0;
    while (completedTasks < numTasks) {
        LOG.info("Waiting for " + (numTasks - completedTasks) + " initializers to finish");
        Future<Void> future = initializerCompletionService.take();
        try {
            future.get();
            completedTasks++;
        } catch (ExecutionException e) {
            if (e.getCause() instanceof Exception) {
                throw (Exception) e.getCause();
            } else {
                throw new Exception(e);
            }
        }
    }
    LOG.info("All initializers finished");
    // group inputs depend on inputs beings initialized. So must be done after.
    initializeGroupInputs();
    // Register the groups so that appropriate calls can be made.
    this.inputReadyTracker.setGroupedInputs(groupInputsMap == null ? null : groupInputsMap.values());
    // Grouped input start will be controlled by the start of the GroupedInput

    // Construct the set of groupedInputs up front so that start is not invoked on them.
    Set<String> groupInputs = Sets.newHashSet();
    // Construct Inputs/Outputs map argument for processor.run()
    // first add the group inputs
    if (groupInputSpecs != null && !groupInputSpecs.isEmpty()) {
        for (GroupInputSpec groupInputSpec : groupInputSpecs) {
            runInputMap.put(groupInputSpec.getGroupName(), groupInputsMap.get(groupInputSpec.getGroupName()));
            groupInputs.addAll(groupInputSpec.getGroupVertices());
        }
    }

    initialMemoryDistributor.makeInitialAllocations();

    LOG.info("Starting Inputs/Outputs");
    int numAutoStarts = 0;
    for (InputSpec inputSpec : inputSpecs) {
        if (groupInputs.contains(inputSpec.getSourceVertexName())) {
            LOG.info("Ignoring " + inputSpec.getSourceVertexName()
                    + " for start, since it will be controlled via it's Group");
            continue;
        }
        if (!inputAlreadyStarted(taskSpec.getVertexName(), inputSpec.getSourceVertexName())) {
            startedInputsMap.put(taskSpec.getVertexName(), inputSpec.getSourceVertexName());
            numAutoStarts++;
            this.initializerCompletionService.submit(new StartInputCallable(
                    inputsMap.get(inputSpec.getSourceVertexName()), inputSpec.getSourceVertexName()));
            LOG.info("Input: " + inputSpec.getSourceVertexName()
                    + " being auto started by the framework. Subsequent instances will not be auto-started");
        }
    }

    if (groupInputSpecs != null) {
        for (GroupInputSpec group : groupInputSpecs) {
            if (!inputAlreadyStarted(taskSpec.getVertexName(), group.getGroupName())) {
                numAutoStarts++;
                this.initializerCompletionService.submit(
                        new StartInputCallable(groupInputsMap.get(group.getGroupName()), group.getGroupName()));
                LOG.info("InputGroup: " + group.getGroupName()
                        + " being auto started by the framework. Subsequent instance will not be auto-started");
            }
        }
    }

    // Shutdown after all tasks complete.
    this.initializerExecutor.shutdown();

    completedTasks = 0;
    LOG.info("Num IOs determined for AutoStart: " + numAutoStarts);
    while (completedTasks < numAutoStarts) {
        LOG.info("Waiting for " + (numAutoStarts - completedTasks) + " IOs to start");
        Future<Void> future = initializerCompletionService.take();
        try {
            future.get();
            completedTasks++;
        } catch (ExecutionException e) {
            if (e.getCause() instanceof Exception) {
                throw (Exception) e.getCause();
            } else {
                throw new Exception(e);
            }
        }
    }
    LOG.info("AutoStartComplete");

    // then add the non-grouped inputs
    for (InputSpec inputSpec : inputSpecs) {
        if (!groupInputs.contains(inputSpec.getSourceVertexName())) {
            LogicalInput input = inputsMap.get(inputSpec.getSourceVertexName());
            runInputMap.put(inputSpec.getSourceVertexName(), input);
        }
    }

    for (OutputSpec outputSpec : outputSpecs) {
        LogicalOutput output = outputsMap.get(outputSpec.getDestinationVertexName());
        String outputName = outputSpec.getDestinationVertexName();
        runOutputMap.put(outputName, output);
    }

    // TODO Maybe close initialized inputs / outputs in case of failure to
    // initialize.

    startRouterThread();
}

From source file:org.geoserver.jdbcconfig.internal.ConfigDatabase.java

@Nullable
public <T extends Info> T getById(final String id, final Class<T> type) {
    Assert.notNull(id, "id");

    Info info = null;/*  w  ww .j a  v  a  2 s. c  o m*/
    try {
        final Callable<? extends Info> valueLoader;
        if (CatalogInfo.class.isAssignableFrom(type)) {
            valueLoader = new CatalogLoader(id);
        } else {
            valueLoader = new ConfigLoader(id);
        }

        info = cache.get(id, valueLoader);

    } catch (CacheLoader.InvalidCacheLoadException notFound) {
        return null;
    } catch (ExecutionException e) {
        Throwables.propagate(e.getCause());
    }

    if (info == null) {
        return null;
    }
    if (info instanceof CatalogInfo) {
        info = resolveCatalog((CatalogInfo) info);
    } else if (info instanceof ServiceInfo) {
        resolveTransient((ServiceInfo) info);
    }

    if (type.isAssignableFrom(info.getClass())) {
        // use ModificationProxy only in this case as returned object is cached. saveInternal
        // follows suite checking whether the object being saved is a mod proxy, but that's not
        // mandatory in this implementation and should only be the case when the object was
        // obtained by id
        return ModificationProxy.create(type.cast(info), type);
    }

    return null;
}

From source file:com.funambol.pushlistener.service.taskexecutor.ScheduledTaskExecutor.java

/**
 * If there is, logs the exception thrown by the taskWrapper execution
 * @param taskWrapper the executed task wrapper
 * @param scheduledFuture the scheduledFuture with the exception (if there is)
 *//*from   w ww .  j a v a2  s.  co m*/
private void logTaskWrapperExecutionError(TaskWrapper taskWrapper, ScheduledFuture scheduledFuture) {
    try {
        if (scheduledFuture.isDone()) {
            scheduledFuture.get();
        }
    } catch (InterruptedException ie) {
    } catch (ExecutionException ee) {
        //
        // This is done to retrieve the possible exception thrown by the
        // task
        //
        Throwable realThrowable = ee.getCause();

        if (taskWrapper != null) {
            log.error("Task '" + taskWrapper + "' throws an uncaught exception. ", realThrowable);
        } else {
            log.error("Uncaught exception thrown by: " + scheduledFuture, realThrowable);
        }
    }
}

From source file:org.apache.storm.localizer.AsyncLocalizer.java

/**
 * This function either returns the blobs in the existing cache or if they don't exist in the
 * cache, it downloads them in parallel (up to SUPERVISOR_BLOBSTORE_DOWNLOAD_THREAD_COUNT)
 * and will block until all of them have been downloaded
 *//*from  ww w . j  a v a 2s.c  om*/
synchronized List<LocalizedResource> getBlobs(List<LocalResource> localResources, String user, String topo,
        File userFileDir) throws AuthorizationException, KeyNotFoundException, IOException {
    if ((boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false)) {
        throw new KeyNotFoundException("symlinks are disabled so blobs cannot be downloaded.");
    }
    LocalizedResourceSet newSet = new LocalizedResourceSet(user);
    LocalizedResourceSet lrsrcSet = userRsrc.putIfAbsent(user, newSet);
    if (lrsrcSet == null) {
        lrsrcSet = newSet;
    }
    ArrayList<LocalizedResource> results = new ArrayList<>();
    ArrayList<Callable<LocalizedResource>> downloads = new ArrayList<>();

    ClientBlobStore blobstore = null;
    try {
        blobstore = getClientBlobStore();
        for (LocalResource localResource : localResources) {
            String key = localResource.getBlobName();
            boolean uncompress = localResource.shouldUncompress();
            LocalizedResource lrsrc = lrsrcSet.get(key, localResource.shouldUncompress());
            boolean isUpdate = false;
            if ((lrsrc != null) && (lrsrc.isUncompressed() == localResource.shouldUncompress())
                    && (isLocalizedResourceDownloaded(lrsrc))) {
                if (isLocalizedResourceUpToDate(lrsrc, blobstore)) {
                    LOG.debug("blob already exists: {}", key);
                    lrsrc.addReference(topo);
                    results.add(lrsrc);
                    continue;
                }
                LOG.debug("blob exists but isn't up to date: {}", key);
                isUpdate = true;
            }

            // go off to blobstore and get it
            // assume dir passed in exists and has correct permission
            LOG.debug("fetching blob: {}", key);
            File downloadDir = getCacheDirForFiles(userFileDir);
            File localFile = new File(downloadDir, key);
            if (uncompress) {
                // for compressed file, download to archives dir
                downloadDir = getCacheDirForArchives(userFileDir);
                localFile = new File(downloadDir, key);
            }
            downloadDir.mkdir();
            downloads.add(new DownloadBlob(this, conf, key, localFile, user, uncompress, isUpdate));
        }
    } finally {
        if (blobstore != null) {
            blobstore.shutdown();
        }
    }
    try {
        List<Future<LocalizedResource>> futures = execService.invokeAll(downloads);
        for (Future<LocalizedResource> futureRsrc : futures) {
            LocalizedResource lrsrc = futureRsrc.get();
            lrsrc.addReference(topo);
            lrsrcSet.add(lrsrc.getKey(), lrsrc, lrsrc.isUncompressed());
            results.add(lrsrc);
        }
    } catch (ExecutionException e) {
        if (e.getCause() instanceof AuthorizationException)
            throw (AuthorizationException) e.getCause();
        else if (e.getCause() instanceof KeyNotFoundException) {
            throw (KeyNotFoundException) e.getCause();
        } else {
            throw new IOException("Error getting blobs", e);
        }
    } catch (RejectedExecutionException re) {
        throw new IOException("RejectedExecutionException: ", re);
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted Exception", ie);
    }
    return results;
}

From source file:net.yacy.cora.protocol.http.HTTPClient.java

private void execute(final HttpUriRequest httpUriRequest, final boolean concurrent) throws IOException {
    final HttpClientContext context = HttpClientContext.create();
    context.setRequestConfig(reqConfBuilder.build());
    if (this.host != null)
        context.setTargetHost(new HttpHost(this.host));

    setHeaders(httpUriRequest);//from  w ww. ja  v  a2 s  . co m
    // statistics
    storeConnectionInfo(httpUriRequest);
    // execute the method; some asserts confirm that that the request can be send with Content-Length and is therefore not terminated by EOF
    if (httpUriRequest instanceof HttpEntityEnclosingRequest) {
        final HttpEntityEnclosingRequest hrequest = (HttpEntityEnclosingRequest) httpUriRequest;
        final HttpEntity entity = hrequest.getEntity();
        assert entity != null;
        //assert !entity.isChunked();
        //assert entity.getContentLength() >= 0;
        assert !hrequest.expectContinue();
    }

    final String initialThreadName = Thread.currentThread().getName();
    Thread.currentThread().setName("HTTPClient-" + httpUriRequest.getURI());
    final long time = System.currentTimeMillis();
    try {

        if (concurrent) {
            FutureTask<CloseableHttpResponse> t = new FutureTask<CloseableHttpResponse>(
                    new Callable<CloseableHttpResponse>() {
                        @Override
                        public CloseableHttpResponse call() throws ClientProtocolException, IOException {
                            final CloseableHttpClient client = clientBuilder.build();
                            CloseableHttpResponse response = client.execute(httpUriRequest, context);
                            return response;
                        }
                    });
            executor.execute(t);
            try {
                this.httpResponse = t.get(this.timeout, TimeUnit.MILLISECONDS);
            } catch (ExecutionException e) {
                throw e.getCause();
            } catch (Throwable e) {
            }
            try {
                t.cancel(true);
            } catch (Throwable e) {
            }
            if (this.httpResponse == null)
                throw new IOException("timout to client after " + this.timeout + "ms" + " for url "
                        + httpUriRequest.getURI().toString());
        } else {
            final CloseableHttpClient client = clientBuilder.build();
            this.httpResponse = client.execute(httpUriRequest, context);
        }
        this.httpResponse.setHeader(HeaderFramework.RESPONSE_TIME_MILLIS,
                Long.toString(System.currentTimeMillis() - time));
    } catch (final Throwable e) {
        ConnectionInfo.removeConnection(httpUriRequest.hashCode());
        httpUriRequest.abort();
        if (this.httpResponse != null)
            this.httpResponse.close();
        //e.printStackTrace();
        throw new IOException(
                "Client can't execute: " + (e.getCause() == null ? e.getMessage() : e.getCause().getMessage())
                        + " duration=" + Long.toString(System.currentTimeMillis() - time) + " for url "
                        + httpUriRequest.getURI().toString());
    } finally {
        /* Restore the thread initial name */
        Thread.currentThread().setName(initialThreadName);
    }
}

From source file:org.apache.solr.request.SimpleFacets.java

License:asdf

/**
 * Returns a list of value constraints and the associated facet counts 
 * for each facet field specified in the params.
 *
 * @see FacetParams#FACET_FIELD/*from   ww  w. j a va  2 s . c o m*/
 * @see #getFieldMissingCount
 * @see #getFacetTermEnumCounts
 */
@SuppressWarnings("unchecked")
public NamedList<Object> getFacetFieldCounts() throws IOException, SyntaxError {

    NamedList<Object> res = new SimpleOrderedMap<>();
    String[] facetFs = global.getParams(FacetParams.FACET_FIELD);
    if (null == facetFs) {
        return res;
    }

    // Passing a negative number for FACET_THREADS implies an unlimited number of threads is acceptable.
    // Also, a subtlety of directExecutor is that no matter how many times you "submit" a job, it's really
    // just a method call in that it's run by the calling thread.
    int maxThreads = req.getParams().getInt(FacetParams.FACET_THREADS, 0);
    Executor executor = maxThreads == 0 ? directExecutor : facetExecutor;
    final Semaphore semaphore = new Semaphore((maxThreads <= 0) ? Integer.MAX_VALUE : maxThreads);
    List<Future<NamedList>> futures = new ArrayList<>(facetFs.length);

    if (fdebugParent != null) {
        fdebugParent.putInfoItem("maxThreads", maxThreads);
    }

    try {
        //Loop over fields; submit to executor, keeping the future
        for (String f : facetFs) {
            if (fdebugParent != null) {
                fdebug = new FacetDebugInfo();
                fdebugParent.addChild(fdebug);
            }
            final ParsedParams parsed = parseParams(FacetParams.FACET_FIELD, f);
            final SolrParams localParams = parsed.localParams;
            final String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
            final String key = parsed.key;
            final String facetValue = parsed.facetValue;
            Callable<NamedList> callable = () -> {
                try {
                    NamedList<Object> result = new SimpleOrderedMap<>();
                    if (termList != null) {
                        List<String> terms = StrUtils.splitSmart(termList, ",", true);
                        result.add(key, getListedTermCounts(facetValue, parsed, terms));
                    } else {
                        result.add(key, getTermCounts(facetValue, parsed));
                    }
                    return result;
                } catch (SolrException se) {
                    throw se;
                } catch (Exception e) {
                    throw new SolrException(ErrorCode.SERVER_ERROR,
                            "Exception during facet.field: " + facetValue, e);
                } finally {
                    semaphore.release();
                }
            };

            RunnableFuture<NamedList> runnableFuture = new FutureTask<>(callable);
            semaphore.acquire();//may block and/or interrupt
            executor.execute(runnableFuture);//releases semaphore when done
            futures.add(runnableFuture);
        } //facetFs loop

        //Loop over futures to get the values. The order is the same as facetFs but shouldn't matter.
        for (Future<NamedList> future : futures) {
            res.addAll(future.get());
        }
        assert semaphore.availablePermits() >= maxThreads;
    } catch (InterruptedException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Error while processing facet fields: InterruptedException", e);
    } catch (ExecutionException ee) {
        Throwable e = ee.getCause();//unwrap
        if (e instanceof RuntimeException) {
            throw (RuntimeException) e;
        }
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                "Error while processing facet fields: " + e.toString(), e);
    }

    return res;
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CQLKeyValueService.java

PreparedStatement getPreparedStatement(String tableName, String query, Session sessionToBeUsed) {
    try {/*from ww  w .  j  av a 2  s  .co m*/
        PreparedStatement statement;

        if (sessionToBeUsed == longRunningQuerySession) {
            statement = cqlStatementCache.LONG_RUNNING_QUERY.get(query).enableTracing();
        } else {
            statement = cqlStatementCache.NORMAL_QUERY.get(query).enableTracing();
        }

        if (shouldTraceQuery(tableName)) {
            statement.enableTracing();
        } else {
            statement.disableTracing();
        }

        return statement;
    } catch (ExecutionException e) {
        Throwables.throwIfInstance(e, Error.class);
        throw Throwables.throwUncheckedException(e.getCause());
    }
}

From source file:org.apache.storm.localizer.AsyncLocalizer.java

/**
 * This function updates blobs on the supervisor. It uses a separate thread pool and runs
 * asynchronously of the download and delete.
 *///w w  w  .  j a v  a  2 s. c om
List<LocalizedResource> updateBlobs(List<LocalResource> localResources, String user)
        throws AuthorizationException, KeyNotFoundException, IOException {
    LocalizedResourceSet lrsrcSet = userRsrc.get(user);
    ArrayList<LocalizedResource> results = new ArrayList<>();
    ArrayList<Callable<LocalizedResource>> updates = new ArrayList<>();

    if (lrsrcSet == null) {
        // resource set must have been removed
        return results;
    }
    ClientBlobStore blobstore = null;
    try {
        blobstore = getClientBlobStore();
        for (LocalResource localResource : localResources) {
            String key = localResource.getBlobName();
            LocalizedResource lrsrc = lrsrcSet.get(key, localResource.shouldUncompress());
            if (lrsrc == null) {
                LOG.warn("blob requested for update doesn't exist: {}", key);
            } else if ((boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false)) {
                LOG.warn("symlinks are disabled so blobs cannot be downloaded.");
            } else {
                // update it if either the version isn't the latest or if any local blob files are missing
                if (!isLocalizedResourceUpToDate(lrsrc, blobstore) || !isLocalizedResourceDownloaded(lrsrc)) {
                    LOG.debug("updating blob: {}", key);
                    updates.add(new DownloadBlob(this, conf, key, new File(lrsrc.getFilePath()), user,
                            lrsrc.isUncompressed(), true));
                }
            }
        }
    } finally {
        if (blobstore != null) {
            blobstore.shutdown();
        }
    }
    try {
        List<Future<LocalizedResource>> futures = execService.invokeAll(updates);
        for (Future<LocalizedResource> futureRsrc : futures) {
            try {
                LocalizedResource lrsrc = futureRsrc.get();
                // put the resource just in case it was removed at same time by the cleaner
                LocalizedResourceSet newSet = new LocalizedResourceSet(user);
                LocalizedResourceSet newlrsrcSet = userRsrc.putIfAbsent(user, newSet);
                if (newlrsrcSet == null) {
                    newlrsrcSet = newSet;
                }
                newlrsrcSet.putIfAbsent(lrsrc.getKey(), lrsrc, lrsrc.isUncompressed());
                results.add(lrsrc);
            } catch (ExecutionException e) {
                LOG.error("Error updating blob: ", e);
                if (e.getCause() instanceof AuthorizationException) {
                    throw (AuthorizationException) e.getCause();
                }
                if (e.getCause() instanceof KeyNotFoundException) {
                    throw (KeyNotFoundException) e.getCause();
                }
            }
        }
    } catch (RejectedExecutionException re) {
        LOG.error("Error updating blobs : ", re);
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted Exception", ie);
    }
    return results;
}

From source file:org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.java

/**
 * @param table the table to load into//from   w  ww.ja va 2 s  .c  o m
 * @param pool the ExecutorService
 * @param queue the queue for LoadQueueItem
 * @param startEndKeys start and end keys
 * @return A map that groups LQI by likely bulk load region targets and Set of missing hfiles.
 */
private Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> groupOrSplitPhase(final Table table,
        ExecutorService pool, Deque<LoadQueueItem> queue, final Pair<byte[][], byte[][]> startEndKeys)
        throws IOException {
    // <region start key, LQI> need synchronized only within this scope of this
    // phase because of the puts that happen in futures.
    Multimap<ByteBuffer, LoadQueueItem> rgs = HashMultimap.create();
    final Multimap<ByteBuffer, LoadQueueItem> regionGroups = Multimaps.synchronizedMultimap(rgs);
    Set<String> missingHFiles = new HashSet<>();
    Pair<Multimap<ByteBuffer, LoadQueueItem>, Set<String>> pair = new Pair<>(regionGroups, missingHFiles);

    // drain LQIs and figure out bulk load groups
    Set<Future<Pair<List<LoadQueueItem>, String>>> splittingFutures = new HashSet<>();
    while (!queue.isEmpty()) {
        final LoadQueueItem item = queue.remove();

        final Callable<Pair<List<LoadQueueItem>, String>> call = new Callable<Pair<List<LoadQueueItem>, String>>() {
            @Override
            public Pair<List<LoadQueueItem>, String> call() throws Exception {
                Pair<List<LoadQueueItem>, String> splits = groupOrSplit(regionGroups, item, table,
                        startEndKeys);
                return splits;
            }
        };
        splittingFutures.add(pool.submit(call));
    }
    // get all the results. All grouping and splitting must finish before
    // we can attempt the atomic loads.
    for (Future<Pair<List<LoadQueueItem>, String>> lqis : splittingFutures) {
        try {
            Pair<List<LoadQueueItem>, String> splits = lqis.get();
            if (splits != null) {
                if (splits.getFirst() != null) {
                    queue.addAll(splits.getFirst());
                } else {
                    missingHFiles.add(splits.getSecond());
                }
            }
        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                LOG.error("IOException during splitting", e1);
                throw (IOException) t; // would have been thrown if not parallelized,
            }
            LOG.error("Unexpected execution exception during splitting", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during splitting", e1);
            throw (InterruptedIOException) new InterruptedIOException().initCause(e1);
        }
    }
    return pair;
}