Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.apache.activemq.store.jdbc.JDBCCleanupLimitedPoolTest.java

@Test
public void testNoDeadlockOnXaPoolExhaustion() throws Exception {
    final CountDownLatch done = new CountDownLatch(1);
    final CountDownLatch doneCommit = new CountDownLatch(1000);

    final ActiveMQXAConnectionFactory factory = new ActiveMQXAConnectionFactory(
            broker.getTransportConnectorByScheme("tcp").getPublishableConnectString());

    ExecutorService executorService = Executors.newCachedThreadPool();
    // some contention over pool of 2
    for (int i = 0; i < 3; i++) {
        executorService.execute(new Runnable() {
            @Override/* ww  w  . ja v a  2 s .c om*/
            public void run() {
                try {
                    ActiveMQXAConnection conn = (ActiveMQXAConnection) factory.createXAConnection();
                    conn.start();
                    XASession sess = conn.createXASession();
                    while (done.getCount() > 0 && doneCommit.getCount() > 0) {
                        Xid xid = createXid();
                        sess.getXAResource().start(xid, XAResource.TMNOFLAGS);
                        MessageProducer producer = sess.createProducer(sess.createQueue("test"));
                        producer.send(sess.createTextMessage("test"));
                        sess.getXAResource().end(xid, XAResource.TMSUCCESS);
                        sess.getXAResource().prepare(xid);
                        sess.getXAResource().commit(xid, false);
                        doneCommit.countDown();
                    }

                    conn.close();

                } catch (Exception ignored) {
                    ignored.printStackTrace();
                }
            }
        });
    }

    executorService.execute(new Runnable() {
        @Override
        public void run() {
            try {
                while (!done.await(10, TimeUnit.MILLISECONDS) && doneCommit.getCount() > 0) {
                    jdbcPersistenceAdapter.cleanup();
                }
            } catch (Exception ignored) {
            }

        }
    });

    executorService.shutdown();
    boolean allComplete = executorService.awaitTermination(40, TimeUnit.SECONDS);
    done.countDown();
    assertTrue("all complete", allComplete);
    executorService.shutdownNow();

    assertTrue("xa tx done", doneCommit.await(10, TimeUnit.SECONDS));
}

From source file:edu.berkeley.sparrow.examples.BackendBenchmarkProfiler.java

/**
 * Run an experiment which launches tasks at {@code arrivalRate} for {@code durationMs}
 * seconds and waits for all tasks to finish. Return a {@link DescriptiveStatistics}
 * object which contains stats about the distribution of task finish times. Tasks
 * are executed in a thread pool which contains at least {@code corePoolSize} threads
 * and grows up to {@code maxPoolSize} threads (growing whenever a new task arrives
 * and all existing threads are used). //from   w w  w  . j  a v a 2 s  .com
 * 
 * Setting {@code maxPoolSize} to a very large number enacts time sharing, while
 * setting it equal to {@code corePoolSize} creates a fixed size task pool.
 * 
 * The derivative of task finishes is tracked by bucketing tasks at the granularity
 * {@code bucketSize}. If it is detected that task finishes are increasing in an 
 * unbounded fashion (i.e. infinite queuing is occuring) a {@link RuntimeException} 
 * is thrown.
 */
public static void runExperiment(double arrivalRate, int corePoolSize, int maxPoolSize, long bucketSize,
        long durationMs, DescriptiveStatistics runTimes, DescriptiveStatistics waitTimes) {
    long startTime = System.currentTimeMillis();
    long keepAliveTime = 10;
    Random r = new Random();
    BlockingQueue<Runnable> runQueue = new LinkedBlockingQueue<Runnable>();
    ExecutorService threadPool = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime,
            TimeUnit.MILLISECONDS, runQueue);
    if (maxPoolSize == Integer.MAX_VALUE) {
        threadPool = Executors.newCachedThreadPool();
    }

    // run times indexed by bucketing interval
    HashMap<Long, List<Long>> bucketedRunTimes = new HashMap<Long, List<Long>>();
    // wait times indexed by bucketing interval
    HashMap<Long, List<Long>> bucketedWaitTimes = new HashMap<Long, List<Long>>();

    /*
     * This is a little tricky. 
     * 
     * We want to generate inter-arrival delays according to the arrival rate specified.
     * The simplest option would be to generate an arrival delay and then sleep() for it
     * before launching each task. This has in issue, however: sleep() might wait 
     * several ms longer than we ask it to. When task arrival rates get really fast, 
     * i.e. one task every 10 ms, sleeping an additional few ms will mean we launch 
     * tasks at a much lower rate than requested.
     * 
     * Instead, we keep track of task launches in a way that does not depend on how long
     * sleep() actually takes. We still might have tasks launch slightly after their
     * scheduled launch time, but we will not systematically "fall behind" due to
     * compounding time lost during sleep()'s;
     */
    long currTime = startTime;
    while (true) {
        long delay = (long) (generateInterarrivalDelay(r, arrivalRate) * 1000);

        // When should the next task launch, based on when the last task was scheduled
        // to launch.
        long nextTime = currTime + delay;

        // Diff gives how long we should wait for the next scheduled task. The difference 
        // may be negative if our last sleep() lasted too long relative to the inter-arrival
        // delay based on the last scheduled launch, so we round up to 0 in that case. 
        long diff = Math.max(0, nextTime - System.currentTimeMillis());
        currTime = nextTime;
        if (diff > 0) {
            try {
                Thread.sleep(diff);
            } catch (InterruptedException e) {
                System.err.println("Unexpected interruption!");
                System.exit(1);
            }
        }
        threadPool.submit((new BenchmarkRunnable(bucketedRunTimes, bucketedWaitTimes, bucketSize)));
        if (System.currentTimeMillis() > startTime + durationMs) {
            break;
        }
    }
    threadPool.shutdown();
    try {
        threadPool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e1) {
        System.err.println("Unexpected interruption!");
        System.exit(1);
    }
    List<Long> times = new ArrayList<Long>(bucketedRunTimes.keySet());
    Collections.sort(times);
    HashMap<Long, DescriptiveStatistics> bucketStats = new HashMap<Long, DescriptiveStatistics>();

    // Remove first and last buckets since they will not be completely full to do
    // discretization. 
    times.remove(0);
    times.remove(times.size() - 1);

    for (Long time : times) {
        DescriptiveStatistics stats = new DescriptiveStatistics();
        List<Long> list = bucketedRunTimes.get(time);
        for (Long l : list) {
            stats.addValue(l);
            runTimes.addValue(l);
        }
        bucketStats.put(time, stats);

        List<Long> waitList = bucketedWaitTimes.get(time);
        for (Long l : waitList) {
            waitTimes.addValue(l);
        }
    }
    int size = bucketStats.size();
    if (size >= 2) {
        DescriptiveStatistics first = bucketStats.get(times.get(0));
        DescriptiveStatistics last = bucketStats.get(times.get(times.size() - 1));
        double increase = last.getPercentile(50) / first.getPercentile(50);
        // A simple heuristic, if the median runtime went up by five from the first to 
        // last complete bucket, we assume we are seeing unbounded growth
        if (increase > 5.0) {
            throw new RuntimeException(
                    "Queue not in steady state: " + last.getMean() + " vs " + first.getMean());
        }
    }
}

From source file:com.threadswarm.imagefeedarchiver.driver.CommandLineDriver.java

@Override
public void run() {
    //setup filters
    List<RssItemFilter> filterList = new LinkedList<RssItemFilter>();
    filterList.add(new PreviouslyDownloadedItemFilter(processedRssItemDAO));
    RssItemFilter chainedItemFilter = new ChainedRssItemFilter(filterList);

    RssChannel rssChannel = null;//from w  w  w.j  a v  a2s  .  c  o  m
    try {
        rssChannel = fetchRssChannel(rssFeedUri);
    } catch (IOException | FeedParserException e) {
        LOGGER.error(
                "An Exception was thrown while attempting to download and parse the target RSS feed.. exiting",
                e);
        System.exit(1);
    }

    List<RssItem> filteredItemList = new LinkedList<RssItem>();
    if (rssChannel != null && rssChannel.getItems() != null) {
        for (RssItem rssItem : rssChannel.getItems()) {
            rssItem = chainedItemFilter.filter(rssItem);
            if (rssItem != null)
                filteredItemList.add(rssItem);
        }
    }

    if (!filteredItemList.isEmpty()) {
        //create list of headers to be used when downloading images
        List<Header> headerList = new ArrayList<Header>(2);
        if (doNotTrackRequested) {
            LOGGER.debug("Adding 'DNT' header to worker requests");
            headerList.add(DNT_HEADER);
        }
        headerList.add(new BasicHeader(HttpHeaders.REFERER, rssFeedUri.toString()));
        headerList = Collections.unmodifiableList(headerList);

        ExecutorService executorService = null;
        try {
            executorService = Executors.newFixedThreadPool(threadCount);
            CompletionService<ProcessedRssItem> completionService = new ExecutorCompletionService<ProcessedRssItem>(
                    executorService);
            Set<URI> processedURISet = new ConcurrentSkipListSet<URI>();
            int itemCount = 0;
            for (RssItem rssItem : filteredItemList) {
                completionService.submit(new RssItemProcessor(httpClient, rssItem, processedRssItemDAO,
                        outputDirectory, headerList, processedURISet, downloadDelay, forceHttps));
                itemCount++;
            }

            LOGGER.info("{} jobs submitted for execution", itemCount);

            for (int x = 0; x < itemCount; x++) {
                ProcessedRssItem processedItem = completionService.take().get();
                LOGGER.info("Item status: {} --> [{}]", processedItem.getRssItem().getTitle(),
                        processedItem.getDownloadStatus());
            }
        } catch (InterruptedException e) {
            LOGGER.warn("Thread interrupted while blocking", e);
            Thread.currentThread().interrupt(); // restore interrupt
        } catch (ExecutionException e) {
            LOGGER.error("An Exception was thrown during worker execution and subsequently propagated", e);
            e.printStackTrace();
        } finally {
            executorService.shutdown();
            try {
                executorService.awaitTermination(10, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOGGER.warn("Thread interrupted while blocking", e);
                Thread.currentThread().interrupt(); // restore interrupt
            }
            httpClient.getConnectionManager().shutdown();
        }
    }
}

From source file:edu.cmu.tetrad.data.DataUtils.java

public static ICovarianceMatrix covarianceNonparanormalDrton(DataSet dataSet) {
    final CovarianceMatrix covMatrix = new CovarianceMatrix(dataSet);
    final TetradMatrix data = dataSet.getDoubleData();
    final int NTHREDS = Runtime.getRuntime().availableProcessors() * 10;
    final int EPOCH_COUNT = 100000;

    ExecutorService executor = Executors.newFixedThreadPool(NTHREDS);
    int runnableCount = 0;

    for (int _i = 0; _i < dataSet.getNumColumns(); _i++) {
        for (int _j = _i; _j < dataSet.getNumColumns(); _j++) {
            final int i = _i;
            final int j = _j;

            //                double tau = StatUtils.rankCorrelation(data.viewColumn(i).toArray(), data.viewColumn(j).toArray());
            Runnable worker = new Runnable() {
                @Override/* w w w.  ja v  a2 s . c o m*/
                public void run() {
                    double tau = StatUtils.kendallsTau(data.getColumn(i).toArray(),
                            data.getColumn(j).toArray());
                    covMatrix.setValue(i, j, tau);
                    covMatrix.setValue(j, i, tau);
                }
            };

            executor.execute(worker);

            if (runnableCount < EPOCH_COUNT) {
                runnableCount++;
                //                    System.out.println(runnableCount);
            } else {
                executor.shutdown();
                try {
                    // Wait until all threads are finish
                    executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
                    System.out.println("Finished all threads");
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }

                executor = Executors.newFixedThreadPool(NTHREDS);
                runnableCount = 0;
            }
        }
    }

    executor.shutdown();

    try {
        // Wait until all threads are finish
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        System.out.println("Finished all threads");
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    return covMatrix;
}

From source file:com.google.cloud.hadoop.gcsio.GoogleCloudStorageIntegrationTest.java

/**
 * Creates objects with the given names in the given bucket.
 *///w  w w .ja v  a2s. c om
private void createObjects(final String bucketName, String[] objectNames) throws IOException {

    final ExecutorService threadPool = Executors.newCachedThreadPool();
    final CountDownLatch counter = new CountDownLatch(objectNames.length);
    List<Future<?>> futures = new ArrayList<>();
    // Do each creation asynchronously.
    for (final String objectName : objectNames) {
        Future<?> future = threadPool.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (objectName.endsWith(GoogleCloudStorage.PATH_DELIMITER)) {
                        mkdir(bucketName, objectName);
                    } else {
                        // Just use objectName as file contents.
                        writeTextFile(bucketName, objectName, objectName);
                    }
                } catch (Throwable ioe) {
                    throw new RuntimeException(
                            String.format("Exception creating %s/%s", bucketName, objectName), ioe);
                } finally {
                    counter.countDown();
                }
            }
        });
        futures.add(future);
    }

    try {
        counter.await();
    } catch (InterruptedException ie) {
        throw new IOException("Interrupted while awaiting object creation!", ie);
    } finally {
        threadPool.shutdown();
        try {
            if (!threadPool.awaitTermination(10L, TimeUnit.SECONDS)) {
                System.err.println("Failed to awaitTermination! Forcing executor shutdown.");
                threadPool.shutdownNow();
            }
        } catch (InterruptedException ie) {
            throw new IOException("Interrupted while shutting down threadpool!", ie);
        }
    }

    for (Future<?> future : futures) {
        try {
            // We should already be done.
            future.get(10, TimeUnit.MILLISECONDS);
        } catch (Exception e) {
            throw new IOException(String.format("Creation of file %s failed with exception", objectName), e);
        }
    }
}

From source file:io.snappydata.hydra.cluster.SnappyTest.java

/**
 * Executes snappy Streaming Jobs in Task.
 *///from ww  w . j  a  v a  2s. c om
public static void HydraTask_executeSnappyStreamingJobWithFileStream() {
    Runnable fileStreaming = new Runnable() {
        public void run() {
            snappyTest.executeSnappyStreamingJob(SnappyPrms.getSnappyStreamingJobClassNames(),
                    "snappyStreamingJobResult_" + System.currentTimeMillis() + ".log");
        }
    };

    Runnable simulateFileStream = new Runnable() {
        public void run() {
            snappyTest.simulateStream();
        }
    };

    ExecutorService es = Executors.newFixedThreadPool(2);
    es.submit(fileStreaming);
    es.submit(simulateFileStream);
    try {
        Log.getLogWriter().info("Sleeping for " + waitTimeBeforeStreamingJobStatus
                + "millis before executor service shut down");
        Thread.sleep(waitTimeBeforeStreamingJobStatus);
        es.shutdown();
        es.awaitTermination(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new TestException(
                "Exception occurred while waiting for the snappy streaming job process execution."
                        + "\nError Message:" + e.getMessage());
    }
}

From source file:org.apache.hadoop.hbase.util.RegionMover.java

@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DLS_DEAD_LOCAL_STORE", justification = "FB is wrong; its size is read")
private void unloadRegions(Admin admin, String server, ArrayList<String> regionServers, boolean ack,
        List<HRegionInfo> movedRegions) throws Exception {
    List<HRegionInfo> regionsToMove = new ArrayList<HRegionInfo>();// FindBugs: DLS_DEAD_LOCAL_STORE
    regionsToMove = getRegions(this.conf, server);
    if (regionsToMove.size() == 0) {
        LOG.info("No Regions to move....Quitting now");
        return;/*from w ww  .j  a  v  a  2s .  co m*/
    } else if (regionServers.size() == 0) {
        LOG.warn("No Regions were moved - no servers available");
        throw new Exception("No online region servers");
    }
    while (true) {
        regionsToMove = getRegions(this.conf, server);
        regionsToMove.removeAll(movedRegions);
        if (regionsToMove.size() == 0) {
            break;
        }
        int counter = 0;
        LOG.info("Moving " + regionsToMove.size() + " regions from " + this.hostname + " to "
                + regionServers.size() + " servers using " + this.maxthreads + " threads .Ack Mode:" + ack);
        ExecutorService moveRegionsPool = Executors.newFixedThreadPool(this.maxthreads);
        List<Future<Boolean>> taskList = new ArrayList<Future<Boolean>>();
        int serverIndex = 0;
        while (counter < regionsToMove.size()) {
            if (ack) {
                Future<Boolean> task = moveRegionsPool.submit(new MoveWithAck(admin, regionsToMove.get(counter),
                        server, regionServers.get(serverIndex), movedRegions));
                taskList.add(task);
            } else {
                Future<Boolean> task = moveRegionsPool.submit(new MoveWithoutAck(admin,
                        regionsToMove.get(counter), server, regionServers.get(serverIndex), movedRegions));
                taskList.add(task);
            }
            counter++;
            serverIndex = (serverIndex + 1) % regionServers.size();
        }
        moveRegionsPool.shutdown();
        long timeoutInSeconds = regionsToMove.size()
                * admin.getConfiguration().getInt(MOVE_WAIT_MAX_KEY, DEFAULT_MOVE_WAIT_MAX);
        try {
            if (!moveRegionsPool.awaitTermination(timeoutInSeconds, TimeUnit.SECONDS)) {
                moveRegionsPool.shutdownNow();
            }
        } catch (InterruptedException e) {
            moveRegionsPool.shutdownNow();
            Thread.currentThread().interrupt();
        }
        for (Future<Boolean> future : taskList) {
            try {
                // if even after shutdownNow threads are stuck we wait for 5 secs max
                if (!future.get(5, TimeUnit.SECONDS)) {
                    LOG.error("Was Not able to move region....Exiting Now");
                    throw new Exception("Could not move region Exception");
                }
            } catch (InterruptedException e) {
                LOG.error("Interrupted while waiting for Thread to Complete " + e.getMessage(), e);
                throw e;
            } catch (ExecutionException e) {
                LOG.error("Got Exception From Thread While moving region " + e.getMessage(), e);
                throw e;
            } catch (CancellationException e) {
                LOG.error("Thread for moving region cancelled. Timeout for cancellation:" + timeoutInSeconds
                        + "secs", e);
                throw e;
            }
        }
    }
}

From source file:io.druid.data.input.impl.prefetch.PrefetchSqlFirehoseFactory.java

@Override
public Firehose connect(InputRowParser<Map<String, Object>> firehoseParser, @Nullable File temporaryDirectory)
        throws IOException {
    if (objects == null) {
        objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects"));
    }//from  w  w  w.  ja va  2s.  c o m
    if (cacheManager.isEnabled() || prefetchConfig.getMaxFetchCapacityBytes() > 0) {
        Preconditions.checkNotNull(temporaryDirectory, "temporaryDirectory");
        Preconditions.checkArgument(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist",
                temporaryDirectory);
        Preconditions.checkArgument(temporaryDirectory.isDirectory(),
                "temporaryDirectory[%s] is not a directory", temporaryDirectory);
    }

    LOG.info("Create a new firehose for [%d] queries", objects.size());

    // fetchExecutor is responsible for background data fetching
    final ExecutorService fetchExecutor = Execs.singleThreaded("firehose_fetch_%d");
    final Fetcher<T> fetcher = new SqlFetcher<>(cacheManager, objects, fetchExecutor, temporaryDirectory,
            prefetchConfig, new ObjectOpenFunction<T>() {
                @Override
                public InputStream open(T object, File outFile) throws IOException {
                    return openObjectStream(object, outFile);
                }

                @Override
                public InputStream open(T object) throws IOException {
                    final File outFile = File.createTempFile("sqlresults_", null, temporaryDirectory);
                    return openObjectStream(object, outFile);
                }
            });

    return new SqlFirehose(new Iterator<JsonIterator<Map<String, Object>>>() {
        @Override
        public boolean hasNext() {
            return fetcher.hasNext();
        }

        @Override
        public JsonIterator<Map<String, Object>> next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }
            try {
                TypeReference<Map<String, Object>> type = new TypeReference<Map<String, Object>>() {
                };
                final OpenedObject<T> openedObject = fetcher.next();
                final InputStream stream = openedObject.getObjectStream();
                return new JsonIterator<>(type, stream, openedObject.getResourceCloser(), objectMapper);
            } catch (Exception ioe) {
                throw new RuntimeException(ioe);
            }
        }
    }, firehoseParser, () -> {
        fetchExecutor.shutdownNow();
        try {
            Preconditions.checkState(
                    fetchExecutor.awaitTermination(prefetchConfig.getFetchTimeout(), TimeUnit.MILLISECONDS));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ISE("Failed to shutdown fetch executor during close");
        }
    });
}

From source file:org.elasticsearch.index.mapper.attachment.AttachmentMapper.java

private Map<String, Object> parseAndCalculateChecksumWithThreads(XContentParser parser, int indexedChars)
        throws SecurityException, IllegalAccessException, NoSuchFieldException, IOException,
        InterruptedException, ExecutionException, TimeoutException {

    Map<String, Object> resultMap = new HashMap<String, Object>();
    Metadata metadata = new Metadata();
    JsonParser jsonParser = getInternalJsonParser(parser);

    PipedInputStream pipedIs = new PipedInputStream();
    PipedOutputStream pipedOs = new PipedOutputStream(pipedIs);

    PipedInputStream pipedIs2 = new PipedInputStream();
    PipedOutputStream pipedOs2 = new PipedOutputStream(pipedIs2);

    ExecutorService pool = Executors.newFixedThreadPool(2);
    Future future = pool.submit(new ParsingThread(pipedIs, metadata, indexedChars));
    Future checksumFuture = null;
    if (calculateChecksum) {
        checksumFuture = pool.submit(new CalcualteChecksumThread(pipedIs2));
    }/*from   w  w w .j a va2 s.  c om*/
    TeeOutputStream tos = new TeeOutputStream(pipedOs, pipedOs2);
    int readBinaryValue = jsonParser.readBinaryValue(tos);
    // tee stream perhaps
    IOUtils.closeQuietly(tos);
    IOUtils.closeQuietly(pipedOs);
    IOUtils.closeQuietly(pipedOs2);

    System.out.println("main thread finish read" + readBinaryValue);
    ParseResult parseResult = (ParseResult) future.get(10 * 100, TimeUnit.SECONDS);
    CalcualteChecksumResult checksumResult = null;
    if (calculateChecksum && checksumFuture != null) {
        checksumResult = (CalcualteChecksumResult) checksumFuture.get(10 * 100, TimeUnit.SECONDS);
        System.out.println(checksumResult.checksum);
    }
    System.out.println("parseResult");
    metadata = parseResult.metadata;
    // although metadata is reference, better return and use for easier
    // refactoring laters
    System.out.println(metadata);
    System.out.println("Thread join");
    pool.shutdown();
    pool.awaitTermination(10 * 100, TimeUnit.SECONDS);
    //TODO align static class and map
    resultMap.put("parseResult", parseResult);
    resultMap.put("checksumResult", checksumResult);
    return resultMap;
}

From source file:org.jbpm.xes.EvaluationExport.java

@Test
@Ignore//from  w  w w.ja  v  a 2  s  .co m
public void exportProcess() throws Exception {
    //users
    final String administrator = "Administrator";

    // create runtime manager with single process - hello.bpmn
    createRuntimeManager("evaluation.bpmn");

    // take RuntimeManager to work with process engine
    RuntimeEngine runtimeEngine = getRuntimeEngine();

    // get access to KieSession instance
    KieSession ksession = runtimeEngine.getKieSession();

    List<Long> pIds = new ArrayList<>();

    int instances = 100;

    IntStream.range(0, instances).forEach(i -> {
        Map<String, Object> vars = new HashMap<>();
        vars.put("employee", administrator);
        vars.put("reason", "test instance " + i);
        vars.put("performance", RandomUtils.nextInt(0, 11));

        // start process
        ProcessInstance processInstance = ksession.startProcess("evaluation", vars);

        // check whether the process instance has completed successfully
        assertProcessInstanceActive(processInstance.getId(), ksession);

        pIds.add(processInstance.getId());
    });

    final TaskService taskService = getRuntimeEngine().getTaskService();
    final List<TaskSummary> tasks = taskService.getTasksAssignedAsBusinessAdministrator(administrator, null);
    final ExecutorService executorService = Executors.newFixedThreadPool(5);
    CountDownLatch count = new CountDownLatch(instances * 3);
    tasks.forEach(t -> {
        executorService.submit(() -> {
            taskService.start(t.getId(), administrator);
            try {
                Thread.sleep(2 * 1000);
            } catch (Exception ex) {
            }
            taskService.complete(t.getId(), administrator, null);
            count.countDown();
            taskService.getTasksByProcessInstanceId(t.getProcessInstanceId()).stream()
                    .filter(newTaskId -> newTaskId.equals(t.getId()) == false).forEach(taskId -> {
                        executorService.submit(() -> {
                            final Task task = taskService.getTaskById(taskId);
                            final String userId = "HR Evaluation".equals(task.getName()) ? "mary" : "john";
                            taskService.claim(taskId, userId);
                            taskService.start(taskId, userId);
                            if ("HR Evaluation".equals(task.getName())) {
                                try {
                                    Thread.sleep(4 * 1000);
                                } catch (Exception ex) {
                                }
                            } else {
                                try {
                                    Thread.sleep(2 * 1000);
                                } catch (Exception ex) {
                                }
                            }
                            taskService.complete(taskId, userId, null);

                            count.countDown();
                        });
                    });
        });
    });

    count.await();
    executorService.shutdown();
    executorService.awaitTermination(1, TimeUnit.MINUTES);

    pIds.forEach(id -> assertProcessInstanceCompleted(id));

    DataSetService dataSetService = new DataSetServiceImpl(() -> xesDataSource);
    XESExportServiceImpl service = new XESExportServiceImpl();
    service.setDataSetService(dataSetService);
    final String xml = service.export(XESProcessFilter.builder().withProcessId("evaluation").build());

    FileUtils.write(new File("evaluation.xes"), xml);
}