Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:kafka.deploy.utils.command.CommandRemoteOperation.java

/**
 * Executes the given commands in parallel on the remote hosts and
 * aggregates the results for the caller.
 * /*  ww w.  j a  va  2s  .c  o m*/
 * @param hostNameCommandLineMap Map with a key is the external host name
 *        and the value is the command line to execute remotely on that host
 * 
 * @return List of result types as dictated by the subclass
 * 
 * @throws RemoteOperationException Thrown on error invoking the command on
 *         one or more clients.
 */
protected void execute(Map<String, String> hostNameCommandLineMap) throws RemoteOperationException {

    ExecutorService threadPool = Executors.newFixedThreadPool(hostNameCommandLineMap.size());
    List<Future<?>> futures = new ArrayList<Future<?>>();

    for (Map.Entry<String, String> entry : hostNameCommandLineMap.entrySet()) {
        String hostName = entry.getKey();
        String commandLine = entry.getValue();

        if (logger.isDebugEnabled())
            logger.debug("Command to execute: " + commandLine);

        List<String> commandArgs = parse(commandLine);
        UnixCommand command = new UnixCommand(hostName, commandArgs);
        Callable<?> callable = getCallable(command);
        Future<?> future = threadPool.submit(callable);
        futures.add(future);
    }

    // Build up a list of all the results and/or capture the errors as they
    // occur.
    try {
        StringBuilder errors = new StringBuilder();

        for (Future<?> future : futures) {
            Throwable t = null;

            try {
                future.get();
            } catch (ExecutionException ex) {
                t = ex.getCause();
            } catch (Exception e) {
                t = e;
            }

            if (t != null) {
                if (logger.isWarnEnabled())
                    logger.warn(t, t);

                if (errors.length() > 0)
                    errors.append("; ");

                errors.append(t.getMessage());
            }
        }

        if (errors.length() > 0)
            throw new RemoteOperationException(errors.toString());
    } finally {
        threadPool.shutdown();

        try {
            threadPool.awaitTermination(60, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            if (logger.isWarnEnabled())
                logger.warn(e, e);
        }
    }
}

From source file:com.espertech.esper.example.rfidassetzone.LRMovingSimMain.java

private void tryPerf(int numSeconds, int numAssetGroups, int numThreads, int ratioZoneMove,
        int ratioZoneSplit) {
    // Create Asset Ids and assign to a zone
    log.info(".tryPerf Creating asset ids");
    String[][] assetIds = new String[numAssetGroups][3];
    int[][] zoneIds = new int[numAssetGroups][3];
    for (int i = 0; i < numAssetGroups; i++) {
        // Generate unique asset id over all groups
        String assetPrefix = String.format("%010d", i); // 10 digit zero padded, i.e. 00000001.n;
        assetIds[i][0] = assetPrefix + "0";
        assetIds[i][1] = assetPrefix + "1";
        assetIds[i][2] = assetPrefix + "2";

        int currentZone = Math.abs(random.nextInt()) % AssetEventGenCallable.NUM_ZONES;
        zoneIds[i][0] = currentZone;/* ww w .j  a va 2  s.  com*/
        zoneIds[i][1] = currentZone;
        zoneIds[i][2] = currentZone;
    }

    // Create statements
    log.info(".tryPerf Creating " + numAssetGroups * 2 + " statements for " + numAssetGroups + " asset groups");
    AssetZoneSplitListener listeners[] = new AssetZoneSplitListener[numAssetGroups];
    for (int i = 0; i < numAssetGroups; i++) {
        String streamName = "CountZone_" + i;
        String assetIdList = "'" + assetIds[i][0] + "','" + assetIds[i][1] + "','" + assetIds[i][2] + "'";

        String textOne = "insert into " + streamName + " select " + i + " as groupId, zone, count(*) as cnt "
                + "from LocationReport(assetId in (" + assetIdList + ")).std:unique(assetId) "
                + "group by zone";
        EPStatement stmtOne = epService.getEPAdministrator().createEPL(textOne);
        if (log.isDebugEnabled())
            stmtOne.addListener(new AssetGroupCountListener());//for debugging

        String textTwo = "select * from pattern [" + "  every a=" + streamName + "(cnt in [1:2]) ->"
                + "  (timer:interval(10 sec) and not " + streamName + "(cnt in (0, 3)))]";
        EPStatement stmtTwo = epService.getEPAdministrator().createEPL(textTwo);
        listeners[i] = new AssetZoneSplitListener();
        stmtTwo.addListener(listeners[i]);
    }

    // First, send an event for each asset with it's current zone
    log.info(".tryPerf Sending one event for each asset");
    for (int i = 0; i < assetIds.length; i++) {
        for (int j = 0; j < assetIds[i].length; j++) {
            LocationReport report = new LocationReport(assetIds[i][j], zoneIds[i][j]);
            epService.getEPRuntime().sendEvent(report);
        }
    }

    // Reset listeners
    for (int i = 0; i < listeners.length; i++) {
        listeners[i].reset();
    }
    Integer[][] assetGroupsForThread = getGroupsPerThread(numAssetGroups, numThreads);

    // For continuous simulation (ends when interrupted),
    if (continuousSimulation) {
        while (true) {
            AssetEventGenCallable callable = new AssetEventGenCallable(epService, assetIds, zoneIds,
                    assetGroupsForThread[0], ratioZoneMove, ratioZoneSplit);
            try {
                callable.call();
            } catch (Exception ex) {
                log.warn("Exception simulating in continuous mode: " + ex.getMessage(), ex);
                break;
            }
        }
        return;
    }

    // Create threadpool
    log.info(".tryPerf Starting " + numThreads + " threads");
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
    Future future[] = new Future[numThreads];
    AssetEventGenCallable callables[] = new AssetEventGenCallable[numThreads];

    for (int i = 0; i < numThreads; i++) {
        callables[i] = new AssetEventGenCallable(epService, assetIds, zoneIds, assetGroupsForThread[i],
                ratioZoneMove, ratioZoneSplit);
        Future<Boolean> f = threadPool.submit(callables[i]);
        future[i] = f;
    }

    // Create threadpool
    log.info(".tryPerf Running for " + numSeconds + " seconds");
    long startTime = System.currentTimeMillis();
    long currTime;
    double deltaSeconds;
    int lastTotalEvents = 0;
    do {
        // sleep
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            log.debug("Interrupted", e);
            break;
        }
        currTime = System.currentTimeMillis();
        deltaSeconds = (currTime - startTime) / 1000.0;

        // report statistics
        int totalEvents = 0;
        int totalZoneMoves = 0;
        int totalZoneSplits = 0;
        int totalZoneSame = 0;
        for (int i = 0; i < callables.length; i++) {
            totalEvents += callables[i].getNumEventsSend();
            totalZoneMoves += callables[i].getNumZoneMoves();
            totalZoneSplits += callables[i].getNumZoneSplits();
            totalZoneSame += callables[i].getNumSameZone();
        }
        double throughputOverall = totalEvents / deltaSeconds;
        double totalLastBatch = totalEvents - lastTotalEvents;
        log.info("totalEvents=" + totalEvents + " delta=" + deltaSeconds + " throughputOverall="
                + throughputOverall + " lastBatch=" + totalLastBatch + " zoneMoves=" + totalZoneMoves
                + " zoneSame=" + totalZoneSame + " zoneSplits=" + totalZoneSplits);
        lastTotalEvents = totalEvents;

        // If we are within 15 seconds of shutdown, stop generating zone splits
        if (((numSeconds - deltaSeconds) < 15) && (callables[0].isGenerateZoneSplit())) {
            log.info(".tryPerf Setting stop split flag on threads");
            for (int i = 0; i < callables.length; i++) {
                callables[i].setGenerateZoneSplit(false);
            }
        }
    } while (deltaSeconds < numSeconds);

    log.info(".tryPerf Shutting down threads");
    for (int i = 0; i < callables.length; i++) {
        callables[i].setShutdown(true);
    }
    threadPool.shutdown();
    try {
        threadPool.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        log.debug("Interrupted", e);
    }

    if (!isAssert) {
        return;
    }

    for (int i = 0; i < numThreads; i++) {
        try {
            if (!(Boolean) future[i].get()) {
                throw new RuntimeException("Invalid result of callable");
            }
        } catch (Exception e) {
            log.error("Exception encountered sending events: " + e.getMessage(), e);
        }
    }

    // Get groups split
    Set<Integer> splitGroups = new HashSet<Integer>();
    for (int i = 0; i < callables.length; i++) {
        splitGroups.addAll(callables[i].getSplitZoneGroups());
    }
    log.info(".tryPerf Generated splits were " + splitGroups + " groups");

    // Compare to listeners
    for (Integer groupId : splitGroups) {
        if (listeners[groupId].getCallbacks().size() == 0) {
            throw new RuntimeException("Invalid result for listener, expected split group");
        }
    }
}

From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationHelper.java

/**
 * Creates objects with the given names in the given bucket.
 *///from  w  w w.  jav a 2 s  .co m
private void createObjects(final String bucketName, String[] objectNames) throws IOException {

    final ExecutorService threadPool = Executors.newCachedThreadPool();
    final CountDownLatch counter = new CountDownLatch(objectNames.length);
    List<Future<?>> futures = new ArrayList<>();
    // Do each creation asynchronously.
    for (final String objectName : objectNames) {
        Future<?> future = threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (objectName.endsWith(GoogleCloudStorage.PATH_DELIMITER)) {
                        mkdir(bucketName, objectName);
                    } else {
                        // Just use objectName as file contents.
                        writeTextFile(bucketName, objectName, objectName);
                    }
                } catch (Throwable ioe) {
                    throw new RuntimeException(
                            String.format("Exception creating %s/%s", bucketName, objectName), ioe);
                } finally {
                    counter.countDown();
                }
            }
        });
        futures.add(future);
    }

    try {
        counter.await();
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted while awaiting object creation!", ie);
    } finally {
        threadPool.shutdown();
        try {
            if (!threadPool.awaitTermination(10L, TimeUnit.SECONDS)) {
                System.err.println("Failed to awaitTermination! Forcing executor shutdown.");
                threadPool.shutdownNow();
            }
        } catch (InterruptedException ie) {
            throw new IOException("Interrupted while shutting down threadpool!", ie);
        }
    }

    for (Future<?> future : futures) {
        try {
            // We should already be done.
            future.get(10, TimeUnit.MILLISECONDS);
        } catch (Exception e) {
            throw new IOException("Creation of file failed with exception", e);
        }
    }
}

From source file:com.shonshampain.streamrecorder.util.StreamProxy.java

private HttpResponse download(String url) {
    DefaultHttpClient seed = new DefaultHttpClient();
    SchemeRegistry registry = new SchemeRegistry();
    registry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80));
    SingleClientConnManager mgr = new MyClientConnManager(seed.getParams(), registry);
    final DefaultHttpClient http = new DefaultHttpClient(mgr, seed.getParams());
    final HttpGet method = new HttpGet(url);
    method.addHeader("Icy-MetaData", "1");
    HttpResponse response;/*from   ww w.  jav  a2 s . com*/
    Logger.d(DBG, TAG, "starting download");
    ExecutorService executor = Executors.newFixedThreadPool(1);
    Callable<HttpResponse> readTask = new Callable<HttpResponse>() {
        @Override
        public HttpResponse call() throws Exception {
            return http.execute(method);
        }
    };
    Future<HttpResponse> future = executor.submit(readTask);
    try {
        response = future.get(STREAM_STALLED_TIMEOUT, TimeUnit.MILLISECONDS);
    } catch (TimeoutException to) {
        return null;
    } catch (InterruptedException ie) {
        Logger.e(TAG, "The read operation was interrupted");
        return null;
    } catch (ExecutionException ee) {
        return null;
    }
    Logger.d(DBG, TAG, "downloaded");
    return response;
}

From source file:info.pancancer.arch3.containerProvisioner.ContainerProvisionerThreads.java

public void startThreads() throws InterruptedException {
    ExecutorService pool = Executors.newFixedThreadPool(DEFAULT_THREADS);
    ProcessVMOrders processVMOrders = new ProcessVMOrders(this.configFile, this.options.has(this.endlessSpec));
    ProvisionVMs provisionVMs = new ProvisionVMs(this.configFile, this.options.has(this.endlessSpec),
            this.options.has(testSpec));
    CleanupVMs cleanupVMs = new CleanupVMs(this.configFile, this.options.has(this.endlessSpec));
    List<Future<?>> futures = new ArrayList<>();
    futures.add(pool.submit(processVMOrders));
    futures.add(pool.submit(provisionVMs));
    futures.add(pool.submit(cleanupVMs));
    try {//from w  w w . java2s  . co m
        for (Future<?> future : futures) {
            future.get();
        }
    } catch (InterruptedException | ExecutionException ex) {
        log.error(ex.toString());
        throw new RuntimeException(ex);
    } finally {
        pool.shutdown();
    }
}

From source file:com.btoddb.fastpersitentqueue.FpqIT.java

@Test
public void testThreading() throws Exception {
    final int numEntries = 1000;
    final int numPushers = 4;
    final int numPoppers = 4;
    final int entrySize = 1000;
    fpq1.setMaxTransactionSize(2000);//w ww . jav a 2s.c  o  m
    final int popBatchSize = 100;
    fpq1.setMaxMemorySegmentSizeInBytes(10000000);
    fpq1.setMaxJournalFileSize(10000000);
    fpq1.setMaxJournalDurationInMs(30000);
    fpq1.setFlushPeriodInMs(1000);
    fpq1.setNumberOfFlushWorkers(4);

    final Random pushRand = new Random(1000L);
    final Random popRand = new Random(1000000L);
    final AtomicInteger pusherFinishCount = new AtomicInteger();
    final AtomicInteger numPops = new AtomicInteger();
    final AtomicLong counter = new AtomicLong();
    final AtomicLong pushSum = new AtomicLong();
    final AtomicLong popSum = new AtomicLong();

    fpq1.init();

    ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers);

    Set<Future> futures = new HashSet<Future>();

    // start pushing
    for (int i = 0; i < numPushers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                for (int i = 0; i < numEntries; i++) {
                    try {
                        long x = counter.getAndIncrement();
                        pushSum.addAndGet(x);
                        ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]);
                        bb.putLong(x);

                        fpq1.beginTransaction();
                        fpq1.push(bb.array());
                        fpq1.commit();
                        if ((x + 1) % 500 == 0) {
                            System.out.println("pushed ID = " + x);
                        }
                        Thread.sleep(pushRand.nextInt(5));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
                pusherFinishCount.incrementAndGet();
            }
        });
        futures.add(future);
    }

    // start popping
    for (int i = 0; i < numPoppers; i++) {
        Future future = execSrvc.submit(new Runnable() {
            @Override
            public void run() {
                while (pusherFinishCount.get() < numPushers || !fpq1.isEmpty()) {
                    try {
                        fpq1.beginTransaction();
                        try {
                            Collection<FpqEntry> entries = fpq1.pop(popBatchSize);
                            if (null == entries) {
                                Thread.sleep(100);
                                continue;
                            }

                            for (FpqEntry entry : entries) {
                                ByteBuffer bb = ByteBuffer.wrap(entry.getData());
                                popSum.addAndGet(bb.getLong());
                                if (entry.getId() % 500 == 0) {
                                    System.out.println("popped ID = " + entry.getId());
                                }
                            }
                            numPops.addAndGet(entries.size());
                            fpq1.commit();
                            entries.clear();
                        } finally {
                            if (fpq1.isTransactionActive()) {
                                fpq1.rollback();
                            }
                        }
                        Thread.sleep(popRand.nextInt(10));
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        futures.add(future);
    }

    boolean finished = false;
    while (!finished) {
        try {
            for (Future f : futures) {
                f.get();
            }
            finished = true;
        } catch (InterruptedException e) {
            // ignore
            Thread.interrupted();
        }
    }

    assertThat(numPops.get(), is(numEntries * numPushers));
    assertThat(fpq1.getNumberOfEntries(), is(0L));
    assertThat(pushSum.get(), is(popSum.get()));
    assertThat(fpq1.getMemoryMgr().getNumberOfActiveSegments(), is(1));
    assertThat(fpq1.getMemoryMgr().getSegments(), hasSize(1));
    assertThat(fpq1.getJournalMgr().getJournalFiles().entrySet(), hasSize(1));
    assertThat(FileUtils.listFiles(fpq1.getPagingDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            is(empty()));
    assertThat(
            FileUtils.listFiles(fpq1.getJournalDirectory(), TrueFileFilter.INSTANCE, TrueFileFilter.INSTANCE),
            hasSize(1));
}

From source file:com.sm.connector.client.PartitionClient.java

public T execute(List<RemoteClientImpl> list, int batchSize) {
    logger.info("execute " + filename + " threads " + list.size() + " invoker " + invoker.toString());
    int noOfThread = list.size();
    //List<RemoteClientImpl> list = createClients( urls);
    //CountDownLatch countDownLatch = new CountDownLatch(noOfThread);
    ExecutorService executor = Executors.newFixedThreadPool(noOfThread, new ThreadPoolFactory("Partition"));
    Aggregate<T> aggregate = null;
    try {/*from  ww  w.  j a v a 2  s .  com*/
        aggregate = (Aggregate<T>) QueryUtils.createInstance(tClass);
    } catch (Exception ex) {
        throw new RuntimeException(ex.getMessage(), ex);
    }
    List<Future<Aggregate<T>>> results = new ArrayList<Future<Aggregate<T>>>(noOfThread);
    for (int i = 0; i < noOfThread; i++) {
        try {
            Aggregate<T> ft = (Aggregate<T>) QueryUtils.createInstance(tClass);
            RunThread runThread = new RunThread(i, list.get(i), batchSize, noOfThread, ft);
            Future<Aggregate<T>> t = executor.submit(runThread);
            results.add(t);
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
    }
    for (Future<Aggregate<T>> each : results) {
        try {
            aggregate.aggregate(each.get().get());
        } catch (Exception ex) {
            logger.error(ex.getMessage(), ex);
        }
    }
    executor.shutdown();
    return aggregate.get();
}

From source file:gdsc.core.clustering.optics.LoOP.java

/**
 * Run the Local Outlier Probability computation using the given number of neighbours.
 *
 * @param k/*  w w  w. j av  a 2s. c  o  m*/
 *            the number of neighbours (excluding self)
 * @param lambda
 *            The number of standard deviations to consider for density computation.
 * @return the LoOP scores
 * @throws InterruptedException
 *             if the current thread was interrupted while waiting
 * @throws ExecutionException
 *             if the computation threw an exception
 */
public double[] run(int k, double lambda) throws InterruptedException, ExecutionException {
    final int size = size();

    // Bounds check k
    if (k < 1)
        k = 1;
    else if (k > size)
        k = size;

    // Multi-thread
    final int nThreads = getNumberOfThreads();
    final ExecutorService executor = Executors.newFixedThreadPool(nThreads);
    final TurboList<Future<?>> futures = new TurboList<Future<?>>(nThreads);
    final int nPerThread = (int) Math.ceil((double) size / nThreads);

    // Find neighbours for each point and 
    // compute probabilistic distances
    final int[][] neighbours = new int[size][k];
    final double[] pd = new double[size];

    for (int from = 0; from < size;) {
        int to = Math.min(from + nPerThread, size);
        futures.add(executor.submit(new KNNWorker(neighbours, k, pd, from, to)));
        from = to;
    }
    wait(futures);

    // Compute Probabilistic Local Outlier Factors (PLOF)
    final double[] plofs = new double[size];
    final TurboList<PLOFWorker> workers = new TurboList<PLOFWorker>(nThreads);
    for (int from = 0; from < size;) {
        int to = Math.min(from + nPerThread, size);
        PLOFWorker w = new PLOFWorker(neighbours, k, pd, plofs, from, to);
        workers.add(w);
        futures.add(executor.submit(w));
        from = to;
    }
    wait(futures);

    // Get the final normalisation factor
    double nplof = 0;
    for (PLOFWorker w : workers)
        nplof += w.nplof;
    nplof = lambda * Math.sqrt(nplof / size);
    if (nplof <= 0)
        nplof = 1;

    // Normalise
    final double norm = 1. / (nplof * Math.sqrt(2.));
    for (int from = 0; from < size;) {
        int to = Math.min(from + nPerThread, size);
        futures.add(executor.submit(new NormWorker(plofs, norm, from, to)));
        from = to;
    }
    wait(futures);

    return plofs;
}

From source file:com.mirth.connect.plugins.datapruner.test.DataPrunerTest.java

@Test
@Ignore/*w w  w  .  j  ava  2 s  .com*/
public final void testConcurrency() throws Exception {
    /*
     * To run this concurrency test, you must setup a "reader" channel through the
     * administrator, that routes messages to other channels that will be pruned. Then specify
     * the ids of those channels below.
     */
    final String readerChannelId = "f7158274-8692-4e53-9d17-db732c3346b8";
    ExecutorService executor = Executors.newSingleThreadExecutor();
    TestUtils.startMirthServer(15000);

    DataPruner pruner = new DataPruner();
    pruner.setPrunerBlockSize(1);
    pruner.setRetryCount(0);

    TestUtils.deleteAllMessages(readerChannelId);
    TestUtils.deleteAllMessages("0831345e-bbe0-4d62-8f2d-c65280bd479b");
    TestUtils.deleteAllMessages("b2e28f1b-d867-435a-a5f6-3b33d5261e66");

    // send messages into the test channel on a separate thread
    Future<Void> future = executor.submit(new Callable<Void>() {
        @Override
        public Void call() throws Exception {
            EngineController engineController = ControllerFactory.getFactory().createEngineController();
            logger.info("Sending messages");

            for (int i = 0; i < 100000; i++) {
                logger.info("sending message #" + i);
                engineController.dispatchRawMessage(readerChannelId, new RawMessage(TestUtils.TEST_HL7_MESSAGE),
                        false, true);
            }

            logger.info("Finished sending messages");
            return null;
        }
    });

    logger.info("Executing pruner");

    // run the pruner while messages are processing
    while (!future.isDone()) {
        pruner.run();
        Thread.sleep(2000);
    }

    logger.info("Test completed");
}

From source file:info.magnolia.imaging.caching.CachingImageStreamerRepositoryTest.java

/**
 * This test is not executed by default - too long !
 * Used to reproduce the "session already closed issue", see MGNLIMG-59.
 * Set the "expiration" property of the jobs map in CachingImageStreamer to a longer value
 * to have more chances of reproducing the problem.
 *//*from  w  ww .  ja v a 2  s  .  c  o m*/
@Ignore
@Test
public void testConcurrencyAndJCRSessions() throws Exception {
    final HierarchyManager srcHM = MgnlContext.getHierarchyManager("website");
    final String srcPath = "/foo/bar";
    ContentUtil.createPath(srcHM, srcPath);

    // ParameterProvider for tests - return a new instance of the same node everytime
    // if we'd return the same src instance everytime, the purpose of this test would be null
    final ParameterProviderFactory<Object, Content> ppf = new TestParameterProviderFactory(srcHM, srcPath);

    final OutputFormat png = new OutputFormat();
    png.setFormatName("png");

    final ImageOperationChain<ParameterProvider<Content>> generator = new ImageOperationChain<ParameterProvider<Content>>();
    final URLImageLoader<ParameterProvider<Content>> load = new URLImageLoader<ParameterProvider<Content>>();
    load.setUrl(getClass().getResource("/funnel.gif").toExternalForm());
    generator.addOperation(load);
    generator.setOutputFormat(png);
    generator.setName("foo blob bar");
    generator.setParameterProviderFactory(ppf);

    // yeah, we're using a "wrong" workspace for the image cache, to avoid having to setup a custom one in this test
    final HierarchyManager hm = MgnlContext.getHierarchyManager("config");

    final ImageStreamer streamer = new CachingImageStreamer(hm, ppf.getCachingStrategy(),
            new DefaultImageStreamer());

    // thread pool of 10, launching 8 requests, can we hit some concurrency please ?
    final ExecutorService executor = Executors.newFixedThreadPool(10);
    final ByteArrayOutputStream[] outs = new ByteArrayOutputStream[8];
    final Future[] futures = new Future[8];
    for (int i = 0; i < outs.length; i++) {
        final int ii = i;
        outs[i] = new ByteArrayOutputStream();
        futures[i] = executor.submit(new Runnable() {
            @Override
            public void run() {
                final ParameterProvider p = generator.getParameterProviderFactory()
                        .newParameterProviderFor(null);
                try {
                    streamer.serveImage(generator, p, outs[ii]);
                } catch (Exception e) {
                    throw new RuntimeException(e); // TODO
                }
            }
        });
    }
    executor.shutdown();
    executor.awaitTermination(30, TimeUnit.SECONDS);

    for (Future<?> future : futures) {
        assertTrue(future.isDone());
        assertFalse(future.isCancelled());
        // ignore the results of TestJob - but if there was an exception thrown by TestJob.call(),
        // it is only thrown back at us when we call get() below. (so the test will fail badly if the job threw an exception)
        Object ignored = future.get();
    }

    shutdownRepository(true);

    // sleep for a while so that the jobs map's expiration thread can kick in !
    Thread.sleep(10000);
}