Example usage for java.util.concurrent ExecutorService isShutdown

List of usage examples for java.util.concurrent ExecutorService isShutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isShutdown.

Prototype

boolean isShutdown();

Source Link

Document

Returns true if this executor has been shut down.

Usage

From source file:org.apache.phoenix.hbase.index.write.TestIndexWriter.java

/**
 * With the move to using a pool of threads to write, we need to ensure that we still block until
 * all index writes for a mutation/batch are completed.
 * @throws Exception on failure//from ww  w.ja  v a 2 s .  c  o  m
 */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
    LOG.info("Starting " + testName.getTableNameString());
    LOG.info("Current thread is interrupted: " + Thread.interrupted());
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);

    byte[] tableName = this.testName.getTableName();
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    Collection<Pair<Mutation, byte[]>> indexUpdates = Arrays.asList(new Pair<Mutation, byte[]>(m, tableName));

    HTableInterface table = Mockito.mock(HTableInterface.class);
    final boolean[] completed = new boolean[] { false };
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // just keep track that it was called
            completed[0] = true;
            return null;
        }
    });
    Mockito.when(table.getTableName()).thenReturn(testName.getTableName());
    // add the table to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);

    // setup the writer and failure policy
    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    IndexWriter writer = new IndexWriter(committer, policy);
    writer.write(indexUpdates);
    assertTrue("Writer returned before the table batch completed! Likely a race condition tripped",
            completed[0]);
    writer.stop(this.testName.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}

From source file:org.acmsl.queryj.api.handlers.AbstractTemplateWritingHandler.java

/**
 * Writes the templates.//  www  . j a  va  2s.c  o m
 * @param templates the templates.
 * @param engineName the engine name.
 * @param parameters the parameters.
 * @param charset the file encoding.
 * @param templateGenerator the template generator.
 * @param threadCount the number of threads to use.
 * @param rootDir the root dir.
 * @return the futures for the concurrent threads.
 * @throws QueryJBuildException if the templates cannot be written.
 */
@NotNull
@SuppressWarnings("unused")
protected List<Future<?>> writeTemplatesMultithread2ndVersion(@Nullable final List<T> templates,
        @NotNull final String engineName, @NotNull final QueryJCommand parameters,
        @NotNull final Charset charset, @NotNull final TG templateGenerator, final int threadCount,
        @NotNull final File rootDir) throws QueryJBuildException {
    @NotNull
    final List<Future<?>> result;

    if (templates != null) {
        result = new ArrayList<>(templates.size());

        @NotNull
        final ExecutorService threadPool = Executors.newFixedThreadPool(threadCount);

        @NotNull
        final CyclicBarrier round = new CyclicBarrier(threadCount);

        @NotNull
        AtomicInteger index = new AtomicInteger(0);

        int intIndex;

        @Nullable
        final Log t_Log = UniqueLogFactory.getLog(AbstractTemplateWritingHandler.class);

        for (@Nullable
        final T t_Template : templates) {
            if (t_Template != null) {
                intIndex = index.incrementAndGet();

                if (intIndex <= threadCount) {
                    if (t_Log != null) {
                        t_Log.info("Starting a new thread " + intIndex + "/" + threadCount);
                    }

                    result.add(threadPool.submit((Runnable) buildGeneratorThread(t_Template, templateGenerator,
                            retrieveOutputDir(t_Template.getTemplateContext(), rootDir, parameters), rootDir,
                            charset, intIndex, round, parameters)));
                } else {
                    if (t_Log != null) {
                        t_Log.info("No threads available " + intIndex + "/" + threadCount);
                    }

                    index = new AtomicInteger(0);

                    try {
                        round.await();
                    } catch (@NotNull final InterruptedException interrupted) {
                        if (t_Log != null) {
                            t_Log.info("Thread pool interrupted while waiting", interrupted);
                        }
                    } catch (@NotNull final BrokenBarrierException brokenBarrier) {
                        if (t_Log != null) {
                            t_Log.info(BROKEN_BARRIER_LITERAL, brokenBarrier);
                        }
                    }

                    if (t_Log != null) {
                        t_Log.info("Resetting thread pool (shutdown? " + threadPool.isShutdown() + ")");
                    }

                    round.reset();
                }
            }
        }
    } else {
        result = new ArrayList<>(0);
    }

    return result;
}

From source file:org.apache.solr.schema.ManagedIndexSchema.java

/**
 * Block up to a specified maximum time until we see agreement on the schema
 * version in ZooKeeper across all replicas for a collection.
 *//*  w w  w . j ava2  s .c  om*/
public static void waitForSchemaZkVersionAgreement(String collection, String localCoreNodeName,
        int schemaZkVersion, ZkController zkController, int maxWaitSecs) {
    RTimer timer = new RTimer();

    // get a list of active replica cores to query for the schema zk version (skipping this core of course)
    List<GetZkSchemaVersionCallable> concurrentTasks = new ArrayList<>();
    for (String coreUrl : getActiveReplicaCoreUrls(zkController, collection, localCoreNodeName))
        concurrentTasks.add(new GetZkSchemaVersionCallable(coreUrl, schemaZkVersion));
    if (concurrentTasks.isEmpty())
        return; // nothing to wait for ...

    log.info("Waiting up to " + maxWaitSecs + " secs for " + concurrentTasks.size()
            + " replicas to apply schema update version " + schemaZkVersion + " for collection " + collection);

    // use an executor service to invoke schema zk version requests in parallel with a max wait time
    int poolSize = Math.min(concurrentTasks.size(), 10);
    ExecutorService parallelExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize,
            new DefaultSolrThreadFactory("managedSchemaExecutor"));
    try {
        List<Future<Integer>> results = parallelExecutor.invokeAll(concurrentTasks, maxWaitSecs,
                TimeUnit.SECONDS);

        // determine whether all replicas have the update
        List<String> failedList = null; // lazily init'd
        for (int f = 0; f < results.size(); f++) {
            int vers = -1;
            Future<Integer> next = results.get(f);
            if (next.isDone() && !next.isCancelled()) {
                // looks to have finished, but need to check the version value too
                try {
                    vers = next.get();
                } catch (ExecutionException e) {
                    // shouldn't happen since we checked isCancelled
                }
            }

            if (vers == -1) {
                String coreUrl = concurrentTasks.get(f).coreUrl;
                log.warn("Core " + coreUrl + " version mismatch! Expected " + schemaZkVersion + " but got "
                        + vers);
                if (failedList == null)
                    failedList = new ArrayList<>();
                failedList.add(coreUrl);
            }
        }

        // if any tasks haven't completed within the specified timeout, it's an error
        if (failedList != null)
            throw new SolrException(ErrorCode.SERVER_ERROR,
                    failedList.size() + " out of " + (concurrentTasks.size() + 1)
                            + " replicas failed to update their schema to version " + schemaZkVersion
                            + " within " + maxWaitSecs + " seconds! Failed cores: " + failedList);

    } catch (InterruptedException ie) {
        log.warn("Core " + localCoreNodeName + " was interrupted waiting for schema version " + schemaZkVersion
                + " to propagate to " + concurrentTasks.size() + " replicas for collection " + collection);

        Thread.currentThread().interrupt();
    } finally {
        if (!parallelExecutor.isShutdown())
            parallelExecutor.shutdown();
    }

    log.info("Took {}ms for {} replicas to apply schema update version {} for collection {}", timer.getTime(),
            concurrentTasks.size(), schemaZkVersion, collection);
}

From source file:com.atlauncher.data.Settings.java

/**
 * Loads info about the different Minecraft versions
 */// w w w . ja va  2 s. co  m
private void loadMinecraftVersions() {
    LogManager.debug("Loading Minecraft versions");
    this.minecraftVersions = new HashMap<String, MinecraftVersion>();
    List<MinecraftVersion> list = new ArrayList<MinecraftVersion>();
    try {
        java.lang.reflect.Type type = new TypeToken<List<MinecraftVersion>>() {
        }.getType();
        list = Gsons.DEFAULT.fromJson(new FileReader(new File(getJSONDir(), "minecraftversions.json")), type);
    } catch (JsonSyntaxException e) {
        logStackTrace(e);
    } catch (JsonIOException e) {
        logStackTrace(e);
    } catch (FileNotFoundException e) {
        logStackTrace(e);
    }
    if (list == null) {
        LogManager.error("Error loading Minecraft Versions. List was null. Exiting!");
        System.exit(1); // Cannot recover from this so exit
    }
    for (MinecraftVersion mv : list) {
        this.minecraftVersions.put(mv.getVersion(), mv);
    }
    LogManager.info("[Background] Checking Minecraft Versions Started");
    ExecutorService executor = Executors.newFixedThreadPool(this.concurrentConnections);
    for (final Entry<String, MinecraftVersion> entry : this.minecraftVersions.entrySet()) {
        executor.execute(new Runnable() {
            @Override
            public void run() {
                entry.getValue().loadVersion();
            }
        });
    }
    executor.execute(new Runnable() {
        @Override
        public void run() {
            LogManager.info("[Background] Checking Minecraft Versions Complete");
        }
    });
    executor.shutdown();
    while (!executor.isShutdown()) {
    }
    LogManager.debug("Finished loading Minecraft versions");
}

From source file:org.apache.hadoop.hbase.index.write.TestIndexWriter.java

/**
 * Index updates can potentially be queued up if there aren't enough writer threads. If a running
 * index write fails, then we should early exit the pending indexupdate, when it comes up (if the
 * pool isn't already shutdown).//from  w w w.j  a  v a 2 s  .  c  o m
 * <p>
 * This test is a little bit racey - we could actually have the failure of the first task before
 * the third task is even submitted. However, we should never see the third task attempt to make
 * the batch write, so we should never see a failure here.
 * @throws Exception on failure
 */
@SuppressWarnings("unchecked")
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);

    // updates to two different tables
    byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    byte[] tableName2 = this.testName.getTableName();// this will sort after the first tablename
    List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));

    // first table will fail
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.batch(Mockito.anyList()))
            .thenThrow(new IOException("Intentional IOException for failed first write."));
    Mockito.when(table.getTableName()).thenReturn(tableName);

    // second table just blocks to make sure that the abort propagates to the third task
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    final boolean[] failed = new boolean[] { false };
    HTableInterface table2 = Mockito.mock(HTableInterface.class);
    Mockito.when(table2.getTableName()).thenReturn(tableName2);
    Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            waitOnAbortedLatch.await();
            return null;
        }
    }).thenAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            failed[0] = true;
            throw new RuntimeException(
                    "Unexpected exception - second index table shouldn't have been written to");
        }
    });

    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    tables.put(new ImmutableBytesPtr(tableName2), table2);

    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter();
    committer.setup(factory, exec, abort, stop, 2);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    IndexWriter writer = new IndexWriter(committer, policy);
    try {
        writer.write(indexUpdates);
        fail("Should not have successfully completed all index writes");
    } catch (SingleIndexWriteFailureException e) {
        LOG.info("Correctly got a failure to reach the index", e);
        // should have correctly gotten the correct abort, so let the next task execute
        waitOnAbortedLatch.countDown();
    }
    assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!",
            failed[0]);
    writer.stop(this.testName.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}

From source file:org.apache.phoenix.hbase.index.write.TestIndexWriter.java

/**
 * Index updates can potentially be queued up if there aren't enough writer threads. If a running
 * index write fails, then we should early exit the pending indexupdate, when it comes up (if the
 * pool isn't already shutdown)./* w ww.j  a va 2 s  .c o m*/
 * <p>
 * This test is a little bit racey - we could actually have the failure of the first task before
 * the third task is even submitted. However, we should never see the third task attempt to make
 * the batch write, so we should never see a failure here.
 * @throws Exception on failure
 */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(3);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);

    // updates to two different tables
    byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    byte[] tableName2 = this.testName.getTableName();// this will sort after the first tablename
    List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));

    // first table will fail
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.batch(Mockito.anyList()))
            .thenThrow(new IOException("Intentional IOException for failed first write."));
    Mockito.when(table.getTableName()).thenReturn(tableName);

    // second table just blocks to make sure that the abort propagates to the third task
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    final boolean[] failed = new boolean[] { false };
    HTableInterface table2 = Mockito.mock(HTableInterface.class);
    Mockito.when(table2.getTableName()).thenReturn(tableName2);
    Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            waitOnAbortedLatch.await();
            return null;
        }
    }).thenAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            failed[0] = true;
            throw new RuntimeException(
                    "Unexpected exception - second index table shouldn't have been written to");
        }
    });

    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    tables.put(new ImmutableBytesPtr(tableName2), table2);

    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    IndexWriter writer = new IndexWriter(committer, policy);
    try {
        writer.write(indexUpdates);
        fail("Should not have successfully completed all index writes");
    } catch (SingleIndexWriteFailureException e) {
        LOG.info("Correctly got a failure to reach the index", e);
        // should have correctly gotten the correct abort, so let the next task execute
        waitOnAbortedLatch.countDown();
    }
    assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!",
            failed[0]);
    writer.stop(this.testName.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}

From source file:org.opencb.opencga.storage.hadoop.variant.HadoopVariantStorageEngine.java

@Override
public List<StoragePipelineResult> index(List<URI> inputFiles, URI outdirUri, boolean doExtract,
        boolean doTransform, boolean doLoad) throws StorageEngineException {

    if (inputFiles.size() == 1 || !doLoad) {
        return super.index(inputFiles, outdirUri, doExtract, doTransform, doLoad);
    }//  w ww .jav a  2s.c  o m

    final boolean doArchive;
    final boolean doMerge;

    if (!getOptions().containsKey(HADOOP_LOAD_ARCHIVE) && !getOptions().containsKey(HADOOP_LOAD_VARIANT)) {
        doArchive = true;
        doMerge = true;
    } else {
        doArchive = getOptions().getBoolean(HADOOP_LOAD_ARCHIVE, false);
        doMerge = getOptions().getBoolean(HADOOP_LOAD_VARIANT, false);
    }

    if (!doArchive && !doMerge) {
        return Collections.emptyList();
    }

    final int nThreadArchive = getOptions().getInt(HADOOP_LOAD_ARCHIVE_BATCH_SIZE, 2);
    ObjectMap extraOptions = new ObjectMap().append(HADOOP_LOAD_ARCHIVE, true).append(HADOOP_LOAD_VARIANT,
            false);

    final List<StoragePipelineResult> concurrResult = new CopyOnWriteArrayList<>();
    List<VariantStoragePipeline> etlList = new ArrayList<>();
    ExecutorService executorService = Executors.newFixedThreadPool(nThreadArchive, r -> {
        Thread t = new Thread(r);
        t.setDaemon(true);
        return t;
    }); // Set Daemon for quick shutdown !!!
    LinkedList<Future<StoragePipelineResult>> futures = new LinkedList<>();
    List<Integer> indexedFiles = new CopyOnWriteArrayList<>();
    for (URI inputFile : inputFiles) {
        //Provide a connected storageETL if load is required.

        VariantStoragePipeline storageETL = newStorageETL(doLoad, new ObjectMap(extraOptions));
        futures.add(executorService.submit(() -> {
            try {
                Thread.currentThread().setName(Paths.get(inputFile).getFileName().toString());
                StoragePipelineResult storagePipelineResult = new StoragePipelineResult(inputFile);
                URI nextUri = inputFile;
                boolean error = false;
                if (doTransform) {
                    try {
                        nextUri = transformFile(storageETL, storagePipelineResult, concurrResult, nextUri,
                                outdirUri);

                    } catch (StoragePipelineException ignore) {
                        //Ignore here. Errors are stored in the ETLResult
                        error = true;
                    }
                }

                if (doLoad && doArchive && !error) {
                    try {
                        loadFile(storageETL, storagePipelineResult, concurrResult, nextUri, outdirUri);
                    } catch (StoragePipelineException ignore) {
                        //Ignore here. Errors are stored in the ETLResult
                        error = true;
                    }
                }
                if (doLoad && !error) {
                    // Read the VariantSource to get the original fileName (it may be different from the
                    // nextUri.getFileName if this is the transformed file)
                    String fileName = storageETL.readVariantSource(nextUri, null).getFileName();
                    // Get latest study configuration from DB, might have been changed since
                    StudyConfiguration studyConfiguration = storageETL.getStudyConfiguration();
                    // Get file ID for the provided file name
                    Integer fileId = studyConfiguration.getFileIds().get(fileName);
                    indexedFiles.add(fileId);
                }
                return storagePipelineResult;
            } finally {
                try {
                    storageETL.close();
                } catch (StorageEngineException e) {
                    logger.error("Issue closing DB connection ", e);
                }
            }
        }));
    }

    executorService.shutdown();

    int errors = 0;
    try {
        while (!futures.isEmpty()) {
            executorService.awaitTermination(1, TimeUnit.MINUTES);
            // Check values
            if (futures.peek().isDone() || futures.peek().isCancelled()) {
                Future<StoragePipelineResult> first = futures.pop();
                StoragePipelineResult result = first.get(1, TimeUnit.MINUTES);
                if (result.getTransformError() != null) {
                    //TODO: Handle errors. Retry?
                    errors++;
                    result.getTransformError().printStackTrace();
                } else if (result.getLoadError() != null) {
                    //TODO: Handle errors. Retry?
                    errors++;
                    result.getLoadError().printStackTrace();
                }
                concurrResult.add(result);
            }
        }
        if (errors > 0) {
            throw new StoragePipelineException("Errors found", concurrResult);
        }

        if (doLoad && doMerge) {
            int batchMergeSize = getOptions().getInt(HADOOP_LOAD_VARIANT_BATCH_SIZE, 10);
            // Overwrite default ID list with user provided IDs
            List<Integer> pendingFiles = indexedFiles;
            if (getOptions().containsKey(HADOOP_LOAD_VARIANT_PENDING_FILES)) {
                List<Integer> idList = getOptions().getAsIntegerList(HADOOP_LOAD_VARIANT_PENDING_FILES);
                if (!idList.isEmpty()) {
                    // only if the list is not empty
                    pendingFiles = idList;
                }
            }

            List<Integer> filesToMerge = new ArrayList<>(batchMergeSize);
            int i = 0;
            for (Iterator<Integer> iterator = pendingFiles.iterator(); iterator.hasNext(); i++) {
                Integer indexedFile = iterator.next();
                filesToMerge.add(indexedFile);
                if (filesToMerge.size() == batchMergeSize || !iterator.hasNext()) {
                    extraOptions = new ObjectMap().append(HADOOP_LOAD_ARCHIVE, false)
                            .append(HADOOP_LOAD_VARIANT, true)
                            .append(HADOOP_LOAD_VARIANT_PENDING_FILES, filesToMerge);

                    AbstractHadoopVariantStoragePipeline localEtl = newStorageETL(doLoad, extraOptions);

                    int studyId = getOptions().getInt(Options.STUDY_ID.key());
                    localEtl.preLoad(inputFiles.get(i), outdirUri);
                    localEtl.merge(studyId, filesToMerge);
                    localEtl.postLoad(inputFiles.get(i), outdirUri);
                    filesToMerge.clear();
                }
            }

            annotateLoadedFiles(outdirUri, inputFiles, concurrResult, getOptions());
            calculateStatsForLoadedFiles(outdirUri, inputFiles, concurrResult, getOptions());

        }
    } catch (InterruptedException e) {
        Thread.interrupted();
        throw new StoragePipelineException("Interrupted!", e, concurrResult);
    } catch (ExecutionException e) {
        throw new StoragePipelineException("Execution exception!", e, concurrResult);
    } catch (TimeoutException e) {
        throw new StoragePipelineException("Timeout Exception", e, concurrResult);
    } finally {
        if (!executorService.isShutdown()) {
            try {
                executorService.shutdownNow();
            } catch (Exception e) {
                logger.error("Problems shutting executer service down", e);
            }
        }
    }
    return concurrResult;
}