Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:com.bigdata.dastor.service.StorageService.java

/** shuts node off to writes, empties memtables and the commit log. */
public synchronized void drain() throws IOException, InterruptedException, ExecutionException {
    ExecutorService mutationStage = StageManager.getStage(StageManager.MUTATION_STAGE);
    if (mutationStage.isTerminated()) {
        logger_.warn("Cannot drain node (did it already happen?)");
        return;/*  ww  w. ja va 2 s.c  o m*/
    }
    setMode("Starting drain process", true);
    Gossiper.instance.stop();
    setMode("Draining: shutting down MessageService", false);
    MessagingService.shutdown();
    setMode("Draining: emptying MessageService pools", false);
    MessagingService.waitFor();

    // lets flush.
    setMode("Draining: flushing column families", false);
    for (String tableName : DatabaseDescriptor.getNonSystemTables())
        for (Future f : Table.open(tableName).flush())
            f.get();

    setMode("Draining: replaying commit log", false);
    CommitLog.instance().forceNewSegment();
    // want to make sure that any segments deleted as a result of flushing are gone.
    DeletionService.waitFor();
    CommitLog.recover();

    // commit log recovery just sends work to the mutation stage. (there could have already been work there anyway.  
    // Either way, we need to let this one drain naturally, and then we're finished.
    setMode("Draining: clearing mutation stage", false);
    mutationStage.shutdown();
    while (!mutationStage.isTerminated())
        mutationStage.awaitTermination(5, TimeUnit.SECONDS);

    setMode("Node is drained", true);
}

From source file:com.testmax.framework.BasePage.java

private void executeRegularTest(BasePage page) throws ConfigDataException {

    threadExit = 0;/*from   ww  w.  j av  a 2s  . c o  m*/
    ExecutorService exec = null;
    executeGraph = new CreateGraph("Execution Graph", "Time", "Execution Count");
    responseGraph = new CreateGraph("Response Graph", "Time", "Response Time (ms)");
    elaspedTimeGraph = new CreateGraph("Elasped Time Graph", "Time", "Elasped Time (ms)");
    activeThreadGraph = new CreateGraph("Active User Graph", "User Count", "Elasped Time (ms)");
    if (page.action.equalsIgnoreCase("performance")) {
        exec = setSuitePerformanceExecutor(page);

    } else if (page.action.equalsIgnoreCase("unit")) {
        this.isUnitTest = true;
        setUnitTestSuiteExecutor();

    } else if (page.action.toLowerCase().indexOf("unit@") >= 0) {
        this.isUnitTest = true;
        setUnitTestExecutor();

    } else {
        exec = setUnitPerformanceExecutor(page);
    }
    try {
        if (!this.isUnitTest) {
            exec.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
        }

    } catch (InterruptedException ex) {

    }

}

From source file:org.apache.hadoop.hbase.util.TestHBaseFsckOneRS.java

/**
 * This test makes sure that parallel instances of Hbck is disabled.
 *
 * @throws Exception/*ww w.j  a  va2 s.  co m*/
 */
@Test(timeout = 180000)
public void testParallelHbck() throws Exception {
    final ExecutorService service;
    final Future<HBaseFsck> hbck1, hbck2;

    class RunHbck implements Callable<HBaseFsck> {
        boolean fail = true;

        @Override
        public HBaseFsck call() {
            Configuration c = new Configuration(conf);
            c.setInt("hbase.hbck.lockfile.attempts", 1);
            // HBASE-13574 found that in HADOOP-2.6 and later, the create file would internally retry.
            // To avoid flakiness of the test, set low max wait time.
            c.setInt("hbase.hbck.lockfile.maxwaittime", 3);
            try {
                return doFsck(c, true); // Exclusive hbck only when fixing
            } catch (Exception e) {
                if (e.getMessage().contains("Duplicate hbck")) {
                    fail = false;
                }
            }
            // If we reach here, then an exception was caught
            if (fail)
                fail();
            return null;
        }
    }
    service = Executors.newFixedThreadPool(2);
    hbck1 = service.submit(new RunHbck());
    hbck2 = service.submit(new RunHbck());
    service.shutdown();
    //wait for 15 seconds, for both hbck calls finish
    service.awaitTermination(15, TimeUnit.SECONDS);
    HBaseFsck h1 = hbck1.get();
    HBaseFsck h2 = hbck2.get();
    // Make sure only one of the calls was successful
    assert (h1 == null || h2 == null);
    if (h1 != null) {
        assert (h1.getRetCode() >= 0);
    }
    if (h2 != null) {
        assert (h2.getRetCode() >= 0);
    }
}

From source file:com.espertech.esper.example.rfidassetzone.LRMovingSimMain.java

private void tryPerf(int numSeconds, int numAssetGroups, int numThreads, int ratioZoneMove,
        int ratioZoneSplit) {
    // Create Asset Ids and assign to a zone
    log.info(".tryPerf Creating asset ids");
    String[][] assetIds = new String[numAssetGroups][3];
    int[][] zoneIds = new int[numAssetGroups][3];
    for (int i = 0; i < numAssetGroups; i++) {
        // Generate unique asset id over all groups
        String assetPrefix = String.format("%010d", i); // 10 digit zero padded, i.e. 00000001.n;
        assetIds[i][0] = assetPrefix + "0";
        assetIds[i][1] = assetPrefix + "1";
        assetIds[i][2] = assetPrefix + "2";

        int currentZone = Math.abs(random.nextInt()) % AssetEventGenCallable.NUM_ZONES;
        zoneIds[i][0] = currentZone;/*from ww  w.j a  va2 s.  c o m*/
        zoneIds[i][1] = currentZone;
        zoneIds[i][2] = currentZone;
    }

    // Create statements
    log.info(".tryPerf Creating " + numAssetGroups * 2 + " statements for " + numAssetGroups + " asset groups");
    AssetZoneSplitListener listeners[] = new AssetZoneSplitListener[numAssetGroups];
    for (int i = 0; i < numAssetGroups; i++) {
        String streamName = "CountZone_" + i;
        String assetIdList = "'" + assetIds[i][0] + "','" + assetIds[i][1] + "','" + assetIds[i][2] + "'";

        String textOne = "insert into " + streamName + " select " + i + " as groupId, zone, count(*) as cnt "
                + "from LocationReport(assetId in (" + assetIdList + ")).std:unique(assetId) "
                + "group by zone";
        EPStatement stmtOne = epService.getEPAdministrator().createEPL(textOne);
        if (log.isDebugEnabled())
            stmtOne.addListener(new AssetGroupCountListener());//for debugging

        String textTwo = "select * from pattern [" + "  every a=" + streamName + "(cnt in [1:2]) ->"
                + "  (timer:interval(10 sec) and not " + streamName + "(cnt in (0, 3)))]";
        EPStatement stmtTwo = epService.getEPAdministrator().createEPL(textTwo);
        listeners[i] = new AssetZoneSplitListener();
        stmtTwo.addListener(listeners[i]);
    }

    // First, send an event for each asset with it's current zone
    log.info(".tryPerf Sending one event for each asset");
    for (int i = 0; i < assetIds.length; i++) {
        for (int j = 0; j < assetIds[i].length; j++) {
            LocationReport report = new LocationReport(assetIds[i][j], zoneIds[i][j]);
            epService.getEPRuntime().sendEvent(report);
        }
    }

    // Reset listeners
    for (int i = 0; i < listeners.length; i++) {
        listeners[i].reset();
    }
    Integer[][] assetGroupsForThread = getGroupsPerThread(numAssetGroups, numThreads);

    // For continuous simulation (ends when interrupted),
    if (continuousSimulation) {
        while (true) {
            AssetEventGenCallable callable = new AssetEventGenCallable(epService, assetIds, zoneIds,
                    assetGroupsForThread[0], ratioZoneMove, ratioZoneSplit);
            try {
                callable.call();
            } catch (Exception ex) {
                log.warn("Exception simulating in continuous mode: " + ex.getMessage(), ex);
                break;
            }
        }
        return;
    }

    // Create threadpool
    log.info(".tryPerf Starting " + numThreads + " threads");
    ExecutorService threadPool = Executors.newFixedThreadPool(numThreads);
    Future future[] = new Future[numThreads];
    AssetEventGenCallable callables[] = new AssetEventGenCallable[numThreads];

    for (int i = 0; i < numThreads; i++) {
        callables[i] = new AssetEventGenCallable(epService, assetIds, zoneIds, assetGroupsForThread[i],
                ratioZoneMove, ratioZoneSplit);
        Future<Boolean> f = threadPool.submit(callables[i]);
        future[i] = f;
    }

    // Create threadpool
    log.info(".tryPerf Running for " + numSeconds + " seconds");
    long startTime = System.currentTimeMillis();
    long currTime;
    double deltaSeconds;
    int lastTotalEvents = 0;
    do {
        // sleep
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            log.debug("Interrupted", e);
            break;
        }
        currTime = System.currentTimeMillis();
        deltaSeconds = (currTime - startTime) / 1000.0;

        // report statistics
        int totalEvents = 0;
        int totalZoneMoves = 0;
        int totalZoneSplits = 0;
        int totalZoneSame = 0;
        for (int i = 0; i < callables.length; i++) {
            totalEvents += callables[i].getNumEventsSend();
            totalZoneMoves += callables[i].getNumZoneMoves();
            totalZoneSplits += callables[i].getNumZoneSplits();
            totalZoneSame += callables[i].getNumSameZone();
        }
        double throughputOverall = totalEvents / deltaSeconds;
        double totalLastBatch = totalEvents - lastTotalEvents;
        log.info("totalEvents=" + totalEvents + " delta=" + deltaSeconds + " throughputOverall="
                + throughputOverall + " lastBatch=" + totalLastBatch + " zoneMoves=" + totalZoneMoves
                + " zoneSame=" + totalZoneSame + " zoneSplits=" + totalZoneSplits);
        lastTotalEvents = totalEvents;

        // If we are within 15 seconds of shutdown, stop generating zone splits
        if (((numSeconds - deltaSeconds) < 15) && (callables[0].isGenerateZoneSplit())) {
            log.info(".tryPerf Setting stop split flag on threads");
            for (int i = 0; i < callables.length; i++) {
                callables[i].setGenerateZoneSplit(false);
            }
        }
    } while (deltaSeconds < numSeconds);

    log.info(".tryPerf Shutting down threads");
    for (int i = 0; i < callables.length; i++) {
        callables[i].setShutdown(true);
    }
    threadPool.shutdown();
    try {
        threadPool.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        log.debug("Interrupted", e);
    }

    if (!isAssert) {
        return;
    }

    for (int i = 0; i < numThreads; i++) {
        try {
            if (!(Boolean) future[i].get()) {
                throw new RuntimeException("Invalid result of callable");
            }
        } catch (Exception e) {
            log.error("Exception encountered sending events: " + e.getMessage(), e);
        }
    }

    // Get groups split
    Set<Integer> splitGroups = new HashSet<Integer>();
    for (int i = 0; i < callables.length; i++) {
        splitGroups.addAll(callables[i].getSplitZoneGroups());
    }
    log.info(".tryPerf Generated splits were " + splitGroups + " groups");

    // Compare to listeners
    for (Integer groupId : splitGroups) {
        if (listeners[groupId].getCallbacks().size() == 0) {
            throw new RuntimeException("Invalid result for listener, expected split group");
        }
    }
}

From source file:fr.efl.chaine.xslt.GauloisPipe.java

/**
 * Execute the specified templates on the specified files to the specified
 * output directory on the specified number of threads.
 *
 * @param templates the specified templates
 * @param inputs the specified input files
 * @param outputDirectory the specified output directory
 * @param nbThreads the specified number of thread
 * @param processor the processor//  ww w  .j av  a  2 s  .  c o m
 * @param listener The listener to start, if not null
 * @return <tt>false</tt> if an error occurs while processing.
 */
private boolean executesPipeOnMultiThread(final Pipe pipe, List<ParametrableFile> inputs, int nbThreads,
        Listener listener) {
    ExecutorService service = (nbThreads == 1) ? Executors.newSingleThreadExecutor(getThreadFactory())
            : Executors.newFixedThreadPool(nbThreads, getThreadFactory());
    // a try to solve multi-thread compiling problem...
    // that's a pretty dirty hack, but just a try, to test...
    if (xslCache.isEmpty() && !inputs.isEmpty()) {
        // in the opposite case, there is only a listener, and probably the first
        // file will be proccess alone...
        try {
            XsltTransformer transformer = buildTransformer(pipe, inputs.get(0).getFile(),
                    inputs.get(0).getFile().toURI().toURL().toExternalForm(),
                    ParametersMerger.merge(inputs.get(0).getParameters(), config.getParams()), messageListener,
                    null);
        } catch (IOException | InvalidSyntaxException | URISyntaxException | SaxonApiException ex) {
            String msg = "while pre-compiling for a multi-thread use...";
            LOGGER.error(msg);
            errors.add(new GauloisRunException(msg, ex));
        }
    }
    for (ParametrableFile pf : inputs) {
        final ParametrableFile fpf = pf;
        Runnable r = new Runnable() {
            @Override
            public void run() {
                try {
                    execute(pipe, fpf, messageListener);
                } catch (SaxonApiException | IOException | InvalidSyntaxException | URISyntaxException ex) {
                    String msg = "[" + instanceName + "] while processing " + fpf.getFile().getName();
                    LOGGER.error(msg, ex);
                    errors.add(new GauloisRunException(msg, fpf.getFile()));
                }
            }
        };
        service.execute(r);
    }
    if (listener == null) {
        // on ajoute plus rien
        service.shutdown();
        try {
            service.awaitTermination(5, TimeUnit.HOURS);
            return true;
        } catch (InterruptedException ex) {
            LOGGER.error("[" + instanceName + "] multi-thread processing interrupted, 5 hour limit exceed.");
            return false;
        }
    } else {
        ExecutionContext context = new ExecutionContext(this, pipe, messageListener, service);
        final HttpListener httpListener = new HttpListener(listener.getPort(), listener.getStopKeyword(),
                context);
        Runnable runner = new Runnable() {
            @Override
            public void run() {
                httpListener.run();
            }
        };
        new Thread(runner).start();
        return true;
    }
}

From source file:com.microsoft.azure.management.datalake.store.uploader.DataLakeStoreUploader.java

/**
 * Concatenates all the segments defined in the metadata into a single stream.
 *
 * @param metadata The {@link UploadMetadata} to determine the segments to concatenate
 * @throws Exception//from w w  w.  ja v  a  2 s .co m
 */
private void concatenateSegments(final UploadMetadata metadata) throws Exception {
    final String[] inputPaths = new String[metadata.getSegmentCount()];

    //verify if target stream exists
    if (frontEnd.streamExists(metadata.getTargetStreamPath())) {
        if (this.getParameters().isOverwrite()) {
            frontEnd.deleteStream(metadata.getTargetStreamPath(), false);
        } else {
            throw new OperationsException("Target Stream already exists");
        }
    }

    //ensure all input streams exist and are of the expected length
    //ensure all segments in the metadata are marked as 'complete'
    final List<Exception> exceptions = new ArrayList<>();
    ExecutorService exec = Executors.newFixedThreadPool(this.getParameters().getThreadCount());
    for (int i = 0; i < metadata.getSegmentCount(); i++) {
        final int finalI = i;
        exec.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    if (metadata.getSegments()[finalI].getStatus() != SegmentUploadStatus.Complete) {
                        throw new UploadFailedException(
                                "Cannot perform 'concatenate' operation because not all streams are fully uploaded.");
                    }

                    String remoteStreamPath = metadata.getSegments()[finalI].getPath();
                    int retryCount = 0;
                    long remoteLength = -1;

                    while (retryCount < SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) {
                        retryCount++;
                        try {
                            remoteLength = frontEnd.getStreamLength(remoteStreamPath);
                            break;
                        } catch (Exception e) {
                            if (retryCount >= SingleSegmentUploader.MAX_BUFFER_UPLOAD_ATTEMPT_COUNT) {
                                throw new UploadFailedException(MessageFormat.format(
                                        "Cannot perform 'concatenate' operation due to the following exception retrieving file information: {0}",
                                        e));
                            }

                            SingleSegmentUploader.waitForRetry(retryCount,
                                    parameters.isUseSegmentBlockBackOffRetryStrategy());
                        }
                    }

                    if (remoteLength != metadata.getSegments()[finalI].getLength()) {
                        throw new UploadFailedException(MessageFormat.format(
                                "Cannot perform 'concatenate' operation because segment {0} has an incorrect length (expected {1}, actual {2}).",
                                finalI, metadata.getSegments()[finalI].getLength(), remoteLength));
                    }

                    inputPaths[finalI] = remoteStreamPath;

                } catch (Exception ex) {
                    //collect any exceptions, whether we just generated them above or whether they come from the Front End,
                    synchronized (exceptions) {
                        exceptions.add(ex);
                    }
                }
            }
        });
    }

    exec.shutdown();

    try {
        exec.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); // waits ~292 years for completion or interruption.
    } catch (InterruptedException e) {
        // add the exception since it will indicate that it was cancelled.
        exceptions.add(e);
    }

    if (exceptions.size() > 0) {
        throw new AggregateUploadException("At least one concatenate test failed", exceptions.remove(0),
                exceptions);
    }

    //issue the command
    frontEnd.concatenate(metadata.getTargetStreamPath(), inputPaths);
}

From source file:org.kie.workbench.common.services.datamodel.backend.server.ModuleDataModelConcurrencyTest.java

@Test
public void testConcurrentResourceUpdates() throws URISyntaxException {
    final URL pomUrl = this.getClass().getResource("/DataModelBackendTest1/pom.xml");
    final org.uberfire.java.nio.file.Path nioPomPath = ioService.get(pomUrl.toURI());
    final Path pomPath = paths.convert(nioPomPath);

    final URL resourceUrl = this.getClass().getResource("/DataModelBackendTest1/src/main/resources/empty.rdrl");
    final org.uberfire.java.nio.file.Path nioResourcePath = ioService.get(resourceUrl.toURI());
    final Path resourcePath = paths.convert(nioResourcePath);

    //Force full build before attempting incremental changes
    final KieModule module = moduleService.resolveModule(resourcePath);
    final BuildResults buildResults = buildService.build(module);
    assertNotNull(buildResults);/*from   w w w .j a  v a  2s . co  m*/
    assertEquals(0, buildResults.getErrorMessages().size());
    assertEquals(1, buildResults.getInformationMessages().size());

    //Perform incremental build
    final int THREADS = 200;
    final Result result = new Result();
    ExecutorService es = Executors.newCachedThreadPool();
    for (int i = 0; i < THREADS; i++) {
        final int operation = (i % 3);

        switch (operation) {
        case 0:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request to update POM received");
                        invalidateCaches(module, pomPath);
                        buildChangeListener.updateResource(pomPath);
                        logger.debug("[Thread: " + Thread.currentThread().getName() + "] POM update completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
            break;
        case 1:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request to update Resource received");
                        invalidateCaches(module, resourcePath);
                        buildChangeListener.addResource(resourcePath);
                        logger.debug(
                                "[Thread: " + Thread.currentThread().getName() + "] Resource update completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
            break;
        case 2:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request for DataModel received");
                        dataModelService.getDataModel(resourcePath);
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] DataModel request completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
        }
    }

    es.shutdown();
    try {
        es.awaitTermination(5, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
    }
    if (result.isFailed()) {
        fail(result.getMessage());
    }
}

From source file:org.kie.workbench.common.services.datamodel.backend.server.ProjectDataModelConcurrencyTest.java

@Test
public void testConcurrentResourceUpdates() throws URISyntaxException {
    final URL pomUrl = this.getClass().getResource("/DataModelBackendTest1/pom.xml");
    final org.uberfire.java.nio.file.Path nioPomPath = ioService.get(pomUrl.toURI());
    final Path pomPath = paths.convert(nioPomPath);

    final URL resourceUrl = this.getClass().getResource("/DataModelBackendTest1/src/main/resources/empty.rdrl");
    final org.uberfire.java.nio.file.Path nioResourcePath = ioService.get(resourceUrl.toURI());
    final Path resourcePath = paths.convert(nioResourcePath);

    //Force full build before attempting incremental changes
    final KieProject project = projectService.resolveProject(resourcePath);
    final BuildResults buildResults = buildService.build(project);
    assertNotNull(buildResults);/*from  w  ww.  j  a v a2  s .c  o  m*/
    assertEquals(0, buildResults.getErrorMessages().size());
    assertEquals(1, buildResults.getInformationMessages().size());

    //Perform incremental build
    final int THREADS = 200;
    final Result result = new Result();
    ExecutorService es = Executors.newCachedThreadPool();
    for (int i = 0; i < THREADS; i++) {
        final int operation = (i % 3);

        switch (operation) {
        case 0:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request to update POM received");
                        invalidateCaches(project, pomPath);
                        buildChangeListener.updateResource(pomPath);
                        logger.debug("[Thread: " + Thread.currentThread().getName() + "] POM update completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
            break;
        case 1:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request to update Resource received");
                        invalidateCaches(project, resourcePath);
                        buildChangeListener.addResource(resourcePath);
                        logger.debug(
                                "[Thread: " + Thread.currentThread().getName() + "] Resource update completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });
            break;
        case 2:
            es.execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] Request for DataModel received");
                        dataModelService.getDataModel(resourcePath);
                        logger.debug("[Thread: " + Thread.currentThread().getName()
                                + "] DataModel request completed");
                    } catch (Throwable e) {
                        result.setFailed(true);
                        result.setMessage(e.getMessage());
                        ExceptionUtils.printRootCauseStackTrace(e);
                    }
                }
            });

        }
    }

    es.shutdown();
    try {
        es.awaitTermination(5, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
    }
    if (result.isFailed()) {
        fail(result.getMessage());
    }

}

From source file:org.alfresco.repo.transaction.RetryingTransactionHelperTest.java

@SuppressWarnings("unchecked")
private void runThreads(final RetryingTransactionHelper txnHelper, final List<Throwable> caughtExceptions,
        final Pair<Integer, Integer>... startDurationPairs) {
    ExecutorService executorService = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS,
            new ArrayBlockingQueue<Runnable>(10));

    class Work implements Runnable {
        private final CountDownLatch startLatch;
        private final long endTime;

        public Work(CountDownLatch startLatch, long endTime) {
            this.startLatch = startLatch;
            this.endTime = endTime;
        }//from  ww w.j  a v a  2s  . c om

        public void run() {
            try {
                txnHelper.doInTransaction(new RetryingTransactionCallback<Void>() {

                    public Void execute() throws Throwable {
                        // Signal that we've started
                        startLatch.countDown();

                        long duration = endTime - System.currentTimeMillis();
                        if (duration > 0) {
                            Thread.sleep(duration);
                        }
                        return null;
                    }
                });
            } catch (Throwable e) {
                caughtExceptions.add(e);
                // We never got a chance to signal we had started so do it now
                if (startLatch.getCount() > 0) {
                    startLatch.countDown();
                }
            }
        }
    }
    ;

    // Schedule the transactions at their required start times
    long startTime = System.currentTimeMillis();
    long currentStart = 0;
    for (Pair<Integer, Integer> pair : startDurationPairs) {
        int start = pair.getFirst();
        long now = System.currentTimeMillis();
        long then = startTime + start;
        if (then > now) {
            try {
                Thread.sleep(then - now);
            } catch (InterruptedException e) {
            }
            currentStart = start;
        }
        CountDownLatch startLatch = new CountDownLatch(1);
        Runnable work = new Work(startLatch, startTime + currentStart + pair.getSecond());
        executorService.execute(work);
        try {
            // Wait for the thread to get up and running. We need them starting in sequence
            startLatch.await(60, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
        }
    }
    // Wait for the threads to have finished
    executorService.shutdown();
    try {
        executorService.awaitTermination(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
    }

}

From source file:org.elasticsearch.client.sniff.SnifferTests.java

/**
 * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}.
 * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
 * The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes
 * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
 * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
 * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
 *///from w w  w  .j  av a 2s  .c  om
public void testOrdinarySniffRounds() throws Exception {
    final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
    long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
    RestClient restClient = mock(RestClient.class);
    CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
    final int iters = randomIntBetween(30, 100);
    final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
    final CountDownLatch completionLatch = new CountDownLatch(1);
    final AtomicInteger runs = new AtomicInteger(iters);
    final ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<Future<?>> lastFuture = new AtomicReference<>();
    final AtomicReference<Sniffer.Task> lastTask = new AtomicReference<>();
    Scheduler scheduler = new Scheduler() {
        @Override
        public Future<?> schedule(Sniffer.Task task, long delayMillis) {
            assertEquals(sniffInterval, task.nextTaskDelay);
            int numberOfRuns = runs.getAndDecrement();
            if (numberOfRuns == iters) {
                //the first call is to schedule the first sniff round from the Sniffer constructor, with delay O
                assertEquals(0L, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
            } else {
                //all of the subsequent times "schedule" is called with delay set to the configured sniff interval
                assertEquals(sniffInterval, delayMillis);
                assertEquals(sniffInterval, task.nextTaskDelay);
                if (numberOfRuns == 0) {
                    completionLatch.countDown();
                    return null;
                }
            }
            //we submit rather than scheduling to make the test quick and not depend on time
            Future<?> future = executor.submit(task);
            futures.add(future);
            if (numberOfRuns == 1) {
                lastFuture.set(future);
                lastTask.set(task);
            }
            return future;
        }

        @Override
        public void shutdown() {
            //the executor is closed externally, shutdown is tested separately
        }
    };
    try {
        new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
        assertTrue("timeout waiting for sniffing rounds to be completed",
                completionLatch.await(1000, TimeUnit.MILLISECONDS));
        assertEquals(iters, futures.size());
        //the last future is the only one that may not be completed yet, as the count down happens
        //while scheduling the next round which is still part of the execution of the runnable itself.
        assertTrue(lastTask.get().hasStarted());
        lastFuture.get().get();
        for (Future<?> future : futures) {
            assertTrue(future.isDone());
            future.get();
        }
    } finally {
        executor.shutdown();
        assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
    }
    int totalRuns = hostsSniffer.runs.get();
    assertEquals(iters, totalRuns);
    int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
    verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
    verifyNoMoreInteractions(restClient);
}