Example usage for java.util.concurrent ExecutorService submit

List of usage examples for java.util.concurrent ExecutorService submit

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService submit.

Prototype

Future<?> submit(Runnable task);

Source Link

Document

Submits a Runnable task for execution and returns a Future representing that task.

Usage

From source file:com.emc.ecs.sync.CasMigrationTest.java

protected List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter)
        throws Exception {
    ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS);

    System.out.print("Creating clips");

    List<String> clipIds = Collections.synchronizedList(new ArrayList<String>());
    List<String> summaries = Collections.synchronizedList(new ArrayList<String>());
    for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) {
        service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries));
    }/*w  w w  .  ja va  2s  .c o m*/

    service.shutdown();
    service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES);
    service.shutdownNow();

    Collections.sort(summaries);
    for (String summary : summaries) {
        summaryWriter.append(summary);
    }

    System.out.println();

    return clipIds;
}

From source file:de.unisb.cs.st.javalanche.mutation.runtime.testDriver.MutationTestDriver.java

/**
 * Runs given test in a new thread with specified timeout
 * (DEFAULT_TIMEOUT_IN_SECONDS) and stores the results in given testResult.
 * //  w ww .  j ava2s . co m
 * @param r
 *            the test to be run
 * @return the time needed for executing the test
 */
protected long runWithTimeoutOld(MutationTestRunnable r) {
    // ArrayList<Thread> threadsPre = ThreadUtil.getThreads();
    ExecutorService service = Executors.newSingleThreadExecutor();
    Future<?> future = service.submit(r);
    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    service.shutdown();
    String exceptionMessage = null;
    Throwable capturedThrowable = null;
    try {
        logger.debug("Start  test: ");
        boolean terminated = service.awaitTermination(timeout, TimeUnit.SECONDS);
        logger.debug("First timeout");
        long time1 = stopWatch.getTime();
        if (!terminated) {
            service.shutdownNow();
        }
        future.get(1, TimeUnit.SECONDS);
        logger.debug("Second timeout");
        long time2 = stopWatch.getTime();
        if (time2 - time1 > 1000) {
            logger.info("Process got some extra time: " + (time2 - time1) + "  " + time2);
        }
        future.cancel(true);

    } catch (InterruptedException e) {
        capturedThrowable = e;
    } catch (ExecutionException e) {
        capturedThrowable = e;
    } catch (TimeoutException e) {
        exceptionMessage = "Mutation causes test timeout";
        capturedThrowable = e;
    } catch (Throwable t) {
        capturedThrowable = t;
    } finally {
        if (capturedThrowable != null) {
            if (exceptionMessage == null) {
                exceptionMessage = "Exception caught during test execution.";
            }
            r.setFailed(exceptionMessage, capturedThrowable);
        }
    }
    if (!future.isDone()) {
        r.setFailed("Mutated Thread is still running after timeout.", null);
        switchOfMutation(future);
    }
    stopWatch.stop();

    if (!r.hasFinished()) {
        shutDown(r, stopWatch);
    }
    logger.debug("End timed test, it took " + stopWatch.getTime() + " ms");
    return stopWatch.getTime();
}

From source file:com.emc.vipr.sync.CasMigrationTest.java

protected List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter)
        throws Exception {
    ExecutorService service = Executors.newFixedThreadPool(CAS_SETUP_THREADS);

    System.out.print("Creating clips");

    List<String> clipIds = Collections.synchronizedList(new ArrayList<String>());
    List<String> summaries = Collections.synchronizedList(new ArrayList<String>());
    for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) {
        service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries));
    }//from   w w  w .ja v a  2s .  c  om

    service.shutdown();
    service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES);
    service.shutdownNow();

    Collections.sort(summaries);
    for (String summary : summaries) {
        summaryWriter.append(summary);
    }

    System.out.println();

    return clipIds;
}

From source file:gobblin.compaction.mapreduce.MRCompactorJobRunner.java

private void copyDataFiles(final Path outputDirectory, List<Path> inputFilePaths) throws IOException {
    ExecutorService executor = ScalingThreadPoolExecutor.newScalingThreadPool(0,
            this.copyLateDataThreadPoolSize, 100, ExecutorsUtils.newThreadFactory(Optional.of(LOG),
                    Optional.of(this.dataset.getName() + "-copy-data")));

    List<Future<?>> futures = Lists.newArrayList();
    for (final Path filePath : inputFilePaths) {
        Future<Void> future = executor.submit(new Callable<Void>() {
            @Override// ww  w.j ava2 s . c o m
            public Void call() throws Exception {
                Path convertedFilePath = MRCompactorJobRunner.this.outputRecordCountProvider.convertPath(
                        LateFileRecordCountProvider.restoreFilePath(filePath),
                        MRCompactorJobRunner.this.inputRecordCountProvider);
                String targetFileName = convertedFilePath.getName();
                Path outPath = MRCompactorJobRunner.this.lateOutputRecordCountProvider
                        .constructLateFilePath(targetFileName, MRCompactorJobRunner.this.fs, outputDirectory);
                HadoopUtils.copyPath(MRCompactorJobRunner.this.fs, filePath, MRCompactorJobRunner.this.fs,
                        outPath, true, MRCompactorJobRunner.this.fs.getConf());
                LOG.debug(String.format("Copied %s to %s.", filePath, outPath));
                return null;
            }
        });
        futures.add(future);
    }
    try {
        for (Future<?> future : futures) {
            future.get();
        }
    } catch (ExecutionException | InterruptedException e) {
        throw new IOException("Failed to copy file.", e);
    } finally {
        ExecutorsUtils.shutdownExecutorService(executor, Optional.of(LOG));
    }
}

From source file:com.netflix.curator.framework.recipes.queue.TestDistributedQueue.java

@Test
public void testSafetyWithCrash() throws Exception {
    final int itemQty = 100;

    DistributedQueue<TestQueueItem> producerQueue = null;
    DistributedQueue<TestQueueItem> consumerQueue1 = null;
    DistributedQueue<TestQueueItem> consumerQueue2 = null;

    CuratorFramework producerClient = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    CuratorFramework consumerClient1 = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    CuratorFramework consumerClient2 = CuratorFrameworkFactory.newClient(server.getConnectString(),
            new RetryOneTime(1));
    try {//w  w  w . j av a 2  s.  co  m
        producerClient.start();
        consumerClient1.start();
        consumerClient2.start();

        ExecutorService service = Executors.newCachedThreadPool();

        // make the producer queue
        {
            producerQueue = QueueBuilder.builder(producerClient, null, serializer, QUEUE_PATH).buildQueue();
            producerQueue.start();
            QueueTestProducer producer = new QueueTestProducer(producerQueue, itemQty, 0);
            service.submit(producer);
        }

        final Set<TestQueueItem> takenItems = Sets.newTreeSet();
        final Set<TestQueueItem> takenItemsForConsumer1 = Sets.newTreeSet();
        final Set<TestQueueItem> takenItemsForConsumer2 = Sets.newTreeSet();
        final AtomicReference<TestQueueItem> thrownItemFromConsumer1 = new AtomicReference<TestQueueItem>(null);

        // make the first consumer queue
        {
            final QueueConsumer<TestQueueItem> ourQueue = new QueueConsumer<TestQueueItem>() {
                @Override
                public void consumeMessage(TestQueueItem message) throws Exception {
                    synchronized (takenItems) {
                        if (takenItems.size() > 10) {
                            thrownItemFromConsumer1.set(message);
                            throw new Exception("dummy"); // simulate a crash
                        }
                    }

                    addToTakenItems(message, takenItems, itemQty);
                    synchronized (takenItemsForConsumer1) {
                        takenItemsForConsumer1.add(message);
                    }

                    Thread.sleep((long) (Math.random() * 5));
                }

                @Override
                public void stateChanged(CuratorFramework client, ConnectionState newState) {
                }
            };
            consumerQueue1 = QueueBuilder.builder(consumerClient1, ourQueue, serializer, QUEUE_PATH)
                    .lockPath("/a/locks").buildQueue();
            consumerQueue1.start();
        }

        // make the second consumer queue
        {
            final QueueConsumer<TestQueueItem> ourQueue = new QueueConsumer<TestQueueItem>() {
                @Override
                public void consumeMessage(TestQueueItem message) throws Exception {
                    addToTakenItems(message, takenItems, itemQty);
                    synchronized (takenItemsForConsumer2) {
                        takenItemsForConsumer2.add(message);
                    }
                    Thread.sleep((long) (Math.random() * 5));
                }

                @Override
                public void stateChanged(CuratorFramework client, ConnectionState newState) {
                }
            };
            consumerQueue2 = QueueBuilder.builder(consumerClient2, ourQueue, serializer, QUEUE_PATH)
                    .lockPath("/a/locks").buildQueue();
            consumerQueue2.start();
        }

        synchronized (takenItems) {
            while (takenItems.size() < itemQty) {
                takenItems.wait(1000);
            }
        }

        int i = 0;
        for (TestQueueItem item : takenItems) {
            Assert.assertEquals(item.str, Integer.toString(i++));
        }

        Assert.assertNotNull(thrownItemFromConsumer1.get());
        Assert.assertTrue((takenItemsForConsumer2.contains(thrownItemFromConsumer1.get())));
        Assert.assertTrue(Sets.intersection(takenItemsForConsumer1, takenItemsForConsumer2).size() == 0);
    } finally {
        IOUtils.closeQuietly(producerQueue);
        IOUtils.closeQuietly(consumerQueue1);
        IOUtils.closeQuietly(consumerQueue2);

        IOUtils.closeQuietly(producerClient);
        IOUtils.closeQuietly(consumerClient1);
        IOUtils.closeQuietly(consumerClient2);
    }
}

From source file:org.apache.solr.client.solrj.impl.HttpSolrClient.java

/**
 * @lucene.experimental/*from   w  w w . j a  v a 2s.  co m*/
 */
public HttpUriRequestResponse httpUriRequest(final SolrRequest request, final ResponseParser processor)
        throws SolrServerException, IOException {
    HttpUriRequestResponse mrr = new HttpUriRequestResponse();
    final HttpRequestBase method = createMethod(request, null);
    ExecutorService pool = ExecutorUtil.newMDCAwareFixedThreadPool(1,
            new SolrjNamedThreadFactory("httpUriRequest"));
    try {
        MDC.put("HttpSolrClient.url", baseUrl);
        mrr.future = pool.submit(() -> executeMethod(method, processor));

    } finally {
        pool.shutdown();
        MDC.remove("HttpSolrClient.url");
    }
    assert method != null;
    mrr.httpUriRequest = method;
    return mrr;
}

From source file:com.joliciel.talismane.machineLearning.maxent.custom.GISTrainer.java

private double nextIteration(double correctionConstant) {
    // compute contribution of p(a|b_i) for each feature and the new
    // correction parameter
    double loglikelihood = 0.0;
    int numEvents = 0;
    int numCorrect = 0;

    int numberOfThreads = modelExpects.length;

    ExecutorService executor = Executors.newFixedThreadPool(numberOfThreads);

    int taskSize = numUniqueEvents / numberOfThreads;

    int leftOver = numUniqueEvents % numberOfThreads;

    List<Future<?>> futures = new ArrayList<Future<?>>();

    for (int i = 0; i < numberOfThreads; i++) {
        if (i != numberOfThreads - 1)
            futures.add(executor.submit(new ModelExpactationComputeTask(i, i * taskSize, taskSize)));
        else//from  w w  w .j  av a2  s . com
            futures.add(executor.submit(new ModelExpactationComputeTask(i, i * taskSize, taskSize + leftOver)));
    }

    for (Future<?> future : futures) {
        ModelExpactationComputeTask finishedTask = null;
        try {
            finishedTask = (ModelExpactationComputeTask) future.get();
        } catch (InterruptedException e) {
            // TODO: We got interrupted, but that is currently not really supported!
            // For now we just print the exception and fail hard. We hopefully soon
            // handle this case properly!
            e.printStackTrace();
            throw new IllegalStateException("Interruption is not supported!", e);
        } catch (ExecutionException e) {
            // Only runtime exception can be thrown during training, if one was thrown
            // it should be re-thrown. That could for example be a NullPointerException
            // which is caused through a bug in our implementation.
            throw new RuntimeException(e.getCause());
        }

        // When they are done, retrieve the results ...
        numEvents += finishedTask.getNumEvents();
        numCorrect += finishedTask.getNumCorrect();
        loglikelihood += finishedTask.getLoglikelihood();
    }

    executor.shutdown();

    display(".");

    // merge the results of the two computations
    for (int pi = 0; pi < numPreds; pi++) {
        int[] activeOutcomes = params[pi].getOutcomes();

        for (int aoi = 0; aoi < activeOutcomes.length; aoi++) {
            for (int i = 1; i < modelExpects.length; i++) {
                modelExpects[0][pi].updateParameter(aoi, modelExpects[i][pi].getParameters()[aoi]);
            }
        }
    }

    display(".");

    // compute the new parameter values
    for (int pi = 0; pi < numPreds; pi++) {
        double[] observed = observedExpects[pi].getParameters();
        double[] model = modelExpects[0][pi].getParameters();
        int[] activeOutcomes = params[pi].getOutcomes();
        for (int aoi = 0; aoi < activeOutcomes.length; aoi++) {
            if (useGaussianSmoothing) {
                params[pi].updateParameter(aoi, gaussianUpdate(pi, aoi, numEvents, correctionConstant));
            } else {
                if (model[aoi] == 0) {
                    LOG.error("Model expects == 0 for " + predLabels[pi] + " " + outcomeLabels[aoi]);
                }
                //params[pi].updateParameter(aoi,(Math.log(observed[aoi]) - Math.log(model[aoi])));
                params[pi].updateParameter(aoi,
                        ((Math.log(observed[aoi]) - Math.log(model[aoi])) / correctionConstant));
            }

            for (int i = 0; i < modelExpects.length; i++)
                modelExpects[i][pi].setParameter(aoi, 0.0); // re-initialize to 0.0's

        }
    }

    display(". loglikelihood=" + loglikelihood + "\t" + ((double) numCorrect / numEvents) + "\n");

    return loglikelihood;
}

From source file:eu.edisonproject.training.wsd.WikipediaOnline.java

private Map<CharSequence, List<CharSequence>> getCategories(Set<Term> terms)
        throws MalformedURLException, InterruptedException, ExecutionException {
    int maxT = 2;
    BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(maxT);
    ExecutorService pool = new ThreadPoolExecutor(maxT, maxT, 500L, TimeUnit.MICROSECONDS, workQueue);

    //        ExecutorService pool = new ThreadPoolExecutor(maxT, maxT,
    //                5000L, TimeUnit.MILLISECONDS,
    //                new ArrayBlockingQueue<>(maxT, true), new ThreadPoolExecutor.CallerRunsPolicy());
    Map<CharSequence, List<CharSequence>> cats = new HashMap<>();
    Set<Future<Map<CharSequence, List<CharSequence>>>> set = new HashSet<>();
    for (Term t : terms) {
        URL url = new URL(PAGE + "?action=query&format=json&prop=categories&pageids=" + t.getUid());
        LOGGER.log(Level.FINE, url.toString());
        WikiRequestor req = new WikiRequestor(url, t.getUid().toString(), 0);
        Future<Map<CharSequence, List<CharSequence>>> future = pool.submit(req);
        set.add(future);//from w  ww  . jav a  2s. c om
    }
    pool.shutdown();

    for (Future<Map<CharSequence, List<CharSequence>>> future : set) {
        while (!future.isDone()) {
            //                LOGGER.log(Level.INFO, "Task is not completed yet....");
            Thread.currentThread().sleep(10);
        }
        Map<CharSequence, List<CharSequence>> c = future.get();
        if (c != null) {
            cats.putAll(c);
        }
    }

    return cats;
}

From source file:com.twitter.distributedlog.auditor.DLAuditor.java

/**
 * Find leak ledgers phase 2: collect ledgers from uris.
 */// w w  w . j  av a 2  s  .  c om
private Set<Long> collectLedgersFromDL(List<URI> uris, List<List<String>> allocationPaths) throws IOException {
    final Set<Long> ledgers = new TreeSet<Long>();
    List<com.twitter.distributedlog.DistributedLogManagerFactory> factories = new ArrayList<com.twitter.distributedlog.DistributedLogManagerFactory>(
            uris.size());
    try {
        for (URI uri : uris) {
            factories.add(new com.twitter.distributedlog.DistributedLogManagerFactory(conf, uri));
        }
        final CountDownLatch doneLatch = new CountDownLatch(uris.size());
        final AtomicInteger numFailures = new AtomicInteger(0);
        ExecutorService executor = Executors.newFixedThreadPool(uris.size());
        try {
            int i = 0;
            for (com.twitter.distributedlog.DistributedLogManagerFactory factory : factories) {
                final com.twitter.distributedlog.DistributedLogManagerFactory dlFactory = factory;
                final URI uri = uris.get(i);
                final List<String> aps = allocationPaths.get(i);
                i++;
                executor.submit(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            logger.info("Collecting ledgers from {} : {}", uri, aps);
                            collectLedgersFromAllocator(uri, dlFactory, aps, ledgers);
                            synchronized (ledgers) {
                                logger.info("Collected {} ledgers from allocators for {} : {} ",
                                        new Object[] { ledgers.size(), uri, ledgers });
                            }
                            collectLedgersFromDL(uri, dlFactory, ledgers);
                        } catch (IOException e) {
                            numFailures.incrementAndGet();
                            logger.info("Error to collect ledgers from DL : ", e);
                        }
                        doneLatch.countDown();
                    }
                });
            }
            try {
                doneLatch.await();
                if (numFailures.get() > 0) {
                    throw new IOException(numFailures.get() + " errors to collect ledgers from DL");
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                logger.warn("Interrupted on collecting ledgers from DL : ", e);
                throw new DLInterruptedException("Interrupted on collecting ledgers from DL : ", e);
            }
        } finally {
            executor.shutdown();
        }
    } finally {
        for (com.twitter.distributedlog.DistributedLogManagerFactory factory : factories) {
            factory.close();
        }
    }
    return ledgers;
}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java

/**
 * Set up the distributed cache by localizing the resources, and updating
 * the configuration with references to the localized resources.
 * @param conf job configuration/*from   ww  w. j  a v a2 s  .  c  o  m*/
 * @throws IOException
 */
public void setup(Configuration conf) throws IOException {
    //If we are not 0th worker, wait for 0th worker to set up the cache
    if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS,
                    WAIT_GRANULARITY_MS);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
        return;
    }

    File workDir = new File(System.getProperty("user.dir"));

    // Generate YARN local resources objects corresponding to the distributed
    // cache configuration
    Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>();
    MRApps.setupDistributedCache(conf, localResources);

    //CODE CHANGE FROM ORIGINAL FILE:
    //We need to clear the resources from jar files, since they are distributed through the IG.
    //
    Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator();
    while (iterator.hasNext()) {
        Entry<String, LocalResource> entry = iterator.next();
        if (entry.getKey().endsWith(".jar")) {
            iterator.remove();
        }
    }

    // Generating unique numbers for FSDownload.

    AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis());

    // Find which resources are to be put on the local classpath
    Map<String, Path> classpaths = new HashMap<String, Path>();
    Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
    if (archiveClassPaths != null) {
        for (Path p : archiveClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
    if (fileClassPaths != null) {
        for (Path p : fileClassPaths) {
            FileSystem remoteFS = p.getFileSystem(conf);
            p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()));
            classpaths.put(p.toUri().getPath().toString(), p);
        }
    }

    // Localize the resources
    LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
    FileContext localFSFileContext = FileContext.getLocalFSFileContext();
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    ExecutorService exec = null;
    try {
        ThreadFactory tf = new ThreadFactoryBuilder()
                .setNameFormat("LocalDistributedCacheManager Downloader #%d").build();
        exec = Executors.newCachedThreadPool(tf);
        Path destPath = localDirAllocator.getLocalPathForWrite(".", conf);
        Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap();
        for (LocalResource resource : localResources.values()) {
            Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf,
                    new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource);
            Future<Path> future = exec.submit(download);
            resourcesToPaths.put(resource, future);
        }
        for (Entry<String, LocalResource> entry : localResources.entrySet()) {
            LocalResource resource = entry.getValue();
            Path path;
            try {
                path = resourcesToPaths.get(resource).get();
            } catch (InterruptedException e) {
                throw new IOException(e);
            } catch (ExecutionException e) {
                throw new IOException(e);
            }
            String pathString = path.toUri().toString();
            String link = entry.getKey();
            String target = new File(path.toUri()).getPath();
            symlink(workDir, target, link);

            if (resource.getType() == LocalResourceType.ARCHIVE) {
                localArchives.add(pathString);
            } else if (resource.getType() == LocalResourceType.FILE) {
                localFiles.add(pathString);
            } else if (resource.getType() == LocalResourceType.PATTERN) {
                //PATTERN is not currently used in local mode
                throw new IllegalArgumentException(
                        "Resource type PATTERN is not " + "implemented yet. " + resource.getResource());
            }
            Path resourcePath;
            try {
                resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource());
            } catch (URISyntaxException e) {
                throw new IOException(e);
            }
            LOG.info(String.format("Localized %s as %s", resourcePath, path));
            String cp = resourcePath.toUri().getPath();
            if (classpaths.keySet().contains(cp)) {
                localClasspaths.add(path.toUri().getPath().toString());
            }
        }
    } finally {
        if (exec != null) {
            exec.shutdown();
        }
    }
    // Update the configuration object with localized data.
    if (!localArchives.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALARCHIVES,
                StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()])));
    }
    if (!localFiles.isEmpty()) {
        conf.set(MRJobConfig.CACHE_LOCALFILES,
                StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()])));
    }
    setupCalled = true;

    //If we are  0th worker, signal action complete
    if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) {
        try {
            InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

}