Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.apache.hadoop.yarn.util.TestFSDownload.java

@Test(timeout = 10000)
public void testDirDownload() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    FileContext files = FileContext.getLocalFSFileContext(conf);
    final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName()));
    files.mkdir(basedir, null, true);/*from   w  w w.  j  a  v  a2  s.co m*/
    conf.setStrings(TestFSDownload.class.getName(), basedir.toString());

    Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>();

    Random rand = new Random();
    long sharedSeed = rand.nextLong();
    rand.setSeed(sharedSeed);
    System.out.println("SEED: " + sharedSeed);

    Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>();
    ExecutorService exec = Executors.newSingleThreadExecutor();
    LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName());
    for (int i = 0; i < 5; ++i) {
        LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE;
        if (i % 2 == 1) {
            vis = LocalResourceVisibility.APPLICATION;
        }

        Path p = new Path(basedir, "dir" + i + ".jar");
        LocalResource rsrc = createJar(files, p, vis);
        rsrcVis.put(rsrc, vis);
        Path destPath = dirs.getLocalPathForWrite(basedir.toString(), conf);
        destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet()));
        FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc);
        pending.put(rsrc, exec.submit(fsd));
    }

    exec.shutdown();
    while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS))
        ;
    for (Future<Path> path : pending.values()) {
        Assert.assertTrue(path.isDone());
    }

    try {

        for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) {
            Path localized = p.getValue().get();
            FileStatus status = files.getFileStatus(localized);

            System.out.println("Testing path " + localized);
            assert (status.isDirectory());
            assert (rsrcVis.containsKey(p.getKey()));

            verifyPermsRecursively(localized.getFileSystem(conf), files, localized, rsrcVis.get(p.getKey()));
        }
    } catch (ExecutionException e) {
        throw new IOException("Failed exec", e);
    }
}

From source file:org.zlogic.voidreader.feed.Feed.java

/**
 * Handles downloaded feed entries//from w w w .  ja v  a  2 s  .  com
 *
 * @param entries the downloaded entries
 * @param handler the feed item handler
 * @param cacheExpiryDate the date after which feed items expire and can be
 * removed
 * @param maxRunSeconds the maximum time application can run before being
 * forcefully terminated
 * @throws IOException if FeedItem constructor fails (e.g. unable to
 * generate HTML based on the template)
 * @throws TimeoutException if the task took too long to complete
 */
private void handleEntries(List<Object> entries, FeedItemHandler handler, Date cacheExpiryDate,
        int maxRunSeconds) throws IOException, TimeoutException {
    if (items == null)
        items = new TreeSet<>();
    Set<FeedItem> newItems = new TreeSet<>();
    for (Object obj : entries)
        if (obj instanceof SyndEntry)
            newItems.add(new FeedItem(this, (SyndEntry) obj));

    //Find outdated items
    for (FeedItem oldItem : new TreeSet<>(items))
        if (!newItems.contains(oldItem) && oldItem.getLastSeen() != null
                && oldItem.getLastSeen().before(cacheExpiryDate)) {
            items.remove(oldItem);
        } else if (newItems.contains(oldItem)) {
            for (FeedItem newItem : newItems)
                if (newItem.equals(oldItem))
                    newItem.setState(oldItem.getState());//Transfer state to new item
            if (oldItem.getState() != FeedItem.State.SENT_PDF)
                items.remove(oldItem);//Replace with new item to resend pdf
            else
                oldItem.updateLastSeen();
        }

    // Ignore already existing items
    newItems.removeAll(items);

    //Add new items
    items.addAll(newItems);
    ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());//TODO: make this configurable
    for (FeedItem item : newItems) {
        executor.submit(new Runnable() {
            private FeedItemHandler handler;
            private Feed feed;
            private FeedItem item;

            public Runnable setParameters(FeedItemHandler handler, Feed feed, FeedItem item) {
                this.handler = handler;
                this.feed = feed;
                this.item = item;
                return this;
            }

            @Override
            public void run() {
                try {
                    handler.handle(feed, item);
                } catch (RuntimeException ex) {
                    log.log(Level.SEVERE, MessageFormat.format(messages.getString("ERROR_HANDLING_FEED_ITEM"),
                            new Object[] { item }), ex);
                    synchronized (items) {
                        items.remove(item);
                    }
                }
            }
        }.setParameters(handler, this, item));
    }
    executor.shutdown();
    try {
        if (!executor.awaitTermination(maxRunSeconds > 0 ? maxRunSeconds : Long.MAX_VALUE, TimeUnit.SECONDS)) {
            throw new TimeoutException(messages.getString("TIMED_OUT_WAITING_FOR_EXECUTOR"));
        }
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
}

From source file:broadwick.Broadwick.java

/**
 * Run the Broadwick framework./*from   www.j  a  v  a 2  s.  co  m*/
 */
@SuppressWarnings("squid:S1147")
public void run() {
    if (project != null) {
        final StopWatch sw = new StopWatch();
        sw.start();

        // initialise the data, by reading the data files and/or the database.
        log.info("Running broadwick {}", BroadwickVersion.getVersionAndTimeStamp());

        try (DataReader dr = new DataReader(project.getData())) {
            final Map<String, Model> registeredModels = registerModels(project, dr.getLookup());
            log.info("Running broadwick for the following models {}", registeredModels.keySet());

            // Run the models, each on a separate thread.
            // TODO in a single-threaded grid environment we cannot do this - need to think again here....
            final int poolSize = registeredModels.size();
            if (poolSize > 0) {
                final ThreadFactory threadFactory = new ThreadFactoryBuilder()
                        .setNameFormat("BroadwickModels-%d").setDaemon(true).build();
                final ExecutorService es = Executors.newFixedThreadPool(poolSize, threadFactory);

                //final StopWatch sw = new StopWatch();
                for (final Entry<String, Model> entry : registeredModels.entrySet()) {
                    es.submit(new Runnable() {
                        @Override
                        public void run() {
                            final String modelName = entry.getKey();
                            final Model model = entry.getValue();
                            try {
                                log.info("Running {} [{}]", modelName, model.getClass().getCanonicalName());
                                model.init();
                                model.run();
                                model.finalise();
                            } catch (Exception ex) {
                                log.error("Error running model {}. see stack trace from details.", modelName);
                                log.error("{}", Throwables.getStackTraceAsString(ex));
                            }
                        }
                    });
                }
                es.shutdown();
                while (!es.isTerminated()) {
                    es.awaitTermination(10, TimeUnit.SECONDS);
                }
                //sw.stop();
                //log.trace("Finished {} simulations in {}.", maxSimulations, sw);
            }
        } catch (Exception ex) {
            log.error("{}", ex.getLocalizedMessage());
            log.error("{}", Throwables.getStackTraceAsString(ex));
            log.error("Something went wrong. See previous messages for details.");
        }

        log.info("Simulation complete. {}", sw.toString());
        // In rare circumstances, where exceptions are caught and the simulation has completed but
        // there are still tasks being submitted to the executor, we need to force the progam to quit.
        Runtime.getRuntime().exit(0);
    }
}

From source file:org.geoserver.bkprst.BackupTask.java

@Override
public void run() {

    // Sets up the filter to exclude some directories according to the previous backup info
    IOFileFilter excludeFilter = this.getExcludeFilter(this.includeData, this.includeGwc, this.includeLog);

    // Sets up source and destination
    File srcMount = this.dataRoot.root();
    File trgMount = new File(this.path);

    // Sets transaction
    this.trans = new BackupTransaction(this, srcMount, trgMount, excludeFilter);

    try {/*  www.j a v  a 2 s.  co m*/
        // Deletes dest directory if existing
        if (trgMount.exists()) {
            Remove.deleteDirectory(trgMount,
                    FileFilterUtils.or(FileFilterUtils.directoryFileFilter(), FileFilterUtils.fileFileFilter()),
                    true, true);
        }

        // Starts transanction
        this.trans.start();
        if (checkForHalt()) {
            LOGGER.fine("run:Halt requested " + this.id);
            return;
        }

        // Sets up the copy task
        ExecutorService ex = Executors.newFixedThreadPool(2);
        if (ex == null || ex.isTerminated()) {
            throw new IllegalArgumentException(
                    "Unable to run asynchronously using a terminated or null ThreadPoolExecutor");
        }
        ExecutorCompletionService<File> cs = new ExecutorCompletionService<File>(ex);

        this.act = new CopyTree(excludeFilter, cs, srcMount, trgMount);
        this.act.addCopyListener(new DefaultProgress(this.id.toString()) {
            public void onUpdateProgress(float percent) {
                super.onUpdateProgress(percent);
                progress = percent;
            }
        });

        // Starts backup
        int workSize = this.act.copy();

        // This is to keep track af restore advancement
        while (workSize-- > 0) {
            Future<File> future;
            try {
                future = cs.take();
                LOGGER.info("copied file: " + future.get());
            } catch (Exception e) {
                LOGGER.log(Level.INFO, e.getLocalizedMessage(), e);
            }

            if (checkForHalt()) {
                LOGGER.fine("run:Halt requested, shutting down threads " + this.id);
                ex.shutdown();
                if (!ex.awaitTermination(5, TimeUnit.SECONDS)) {
                    throw new RuntimeException("Unable to stop backup task");
                }
                return;
            }
        }

        // Writes info about backup
        if (!this.writeBackupInfo(this.path)) {
            LOGGER.severe(
                    "Backup data info were not written properly, a restore operation will fail on this data");
            this.state = BrTaskState.FAILED;
        }

        if (checkForHalt()) {
            LOGGER.fine("run:Halt requested " + this.id);
            return;
        }
        // Restore completed
        this.trans.commit();

    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, e.getLocalizedMessage(), e);
        // In case of errors, rollbacks
        this.trans.rollback();
    } finally {
        haltSemaphore.release();
    }
}

From source file:io.druid.data.input.impl.prefetch.PrefetchableTextFilesFirehoseFactory.java

@Override
public Firehose connect(StringInputRowParser firehoseParser, File temporaryDirectory) throws IOException {
    if (!cacheManager.isEnabled() && maxFetchCapacityBytes == 0) {
        return super.connect(firehoseParser, temporaryDirectory);
    }/*from   w  w  w. ja  v a 2  s  . c om*/

    if (objects == null) {
        objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects"));
    }

    Preconditions.checkState(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist",
            temporaryDirectory);
    Preconditions.checkState(temporaryDirectory.isDirectory(), "temporaryDirectory[%s] is not a directory",
            temporaryDirectory);

    LOG.info("Create a new firehose for [%d] objects", objects.size());

    // fetchExecutor is responsible for background data fetching
    final ExecutorService fetchExecutor = Execs.singleThreaded("firehose_fetch_%d");
    final Fetcher<T> fetcher = new Fetcher<>(cacheManager, objects, fetchExecutor, temporaryDirectory,
            maxFetchCapacityBytes, prefetchTriggerBytes, fetchTimeout, maxFetchRetry, this::openObjectStream);

    return new FileIteratingFirehose(new Iterator<LineIterator>() {
        @Override
        public boolean hasNext() {
            return fetcher.hasNext();
        }

        @Override
        public LineIterator next() {
            if (!hasNext()) {
                throw new NoSuchElementException();
            }

            final OpenedObject<T> openedObject = fetcher.next();
            final InputStream stream;
            try {
                stream = wrapObjectStream(openedObject.getObject(), openedObject.getObjectStream());
            } catch (IOException e) {
                throw new RuntimeException(e);
            }

            return new ResourceCloseableLineIterator(new InputStreamReader(stream, StandardCharsets.UTF_8),
                    openedObject.getResourceCloser());
        }
    }, firehoseParser, () -> {
        fetchExecutor.shutdownNow();
        try {
            Preconditions.checkState(fetchExecutor.awaitTermination(fetchTimeout, TimeUnit.MILLISECONDS));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new ISE("Failed to shutdown fetch executor during close");
        }
    });
}

From source file:io.hops.security.TestUsersGroups.java

public void testConcurrentAddUser(int cacheTime, int cacheSize) throws Exception {
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SECS, Integer.toString(cacheTime));
    conf.set(CommonConfigurationKeys.HOPS_UG_CACHE_SIZE, Integer.toString(cacheSize));
    HdfsStorageFactory.resetDALInitialized();
    HdfsStorageFactory.setConfiguration(conf);
    HdfsStorageFactory.formatStorage();/*www  .ja va2  s  . co m*/
    UsersGroups.createSyncRow();

    final String userName = "user1";
    final String groupNmae = "group1";
    final int CONCURRENT_USERS = 100;
    ExecutorService executorService = Executors.newFixedThreadPool(CONCURRENT_USERS);

    List<Callable<Integer>> callables = new ArrayList<>();
    for (int i = 0; i < CONCURRENT_USERS; i++) {
        callables.add(new AddUser(userName, groupNmae));
    }

    List<Future<Integer>> futures = executorService.invokeAll(callables);
    executorService.shutdown();
    executorService.awaitTermination(10, TimeUnit.SECONDS);

    UsersGroups.clearCache();

    for (Future<Integer> f : futures) {
        try {
            f.get();
        } catch (ExecutionException ex) {
            ex.printStackTrace();
            fail();
        }
    }
}

From source file:com.tasktop.c2c.server.ssh.server.commands.AbstractInteractiveProxyCommand.java

protected void performCommand(Environment env, ProjectService service, String projectId, String path,
        String requestPath, RequestHeadersSupport headers) throws CommandException {
    String internalProxyUri = service.computeInternalProxyBaseUri(false);
    if (internalProxyUri == null) {
        throw new IllegalStateException();
    }/*from  ww  w.  j a  v  a2s  . c o  m*/
    URI targetUri;
    try {
        if (!internalProxyUri.endsWith("/")) {
            internalProxyUri += "/";
        }
        internalProxyUri += getName() + '/' + path;

        targetUri = new URI(internalProxyUri);
    } catch (URISyntaxException e) {
        throw new RuntimeException(e);
    }
    String host = targetUri.getHost();
    int port = targetUri.getPort();
    if (port < 0) {
        port = 80;
    }
    if (targetUri.getScheme() == null || !targetUri.getScheme().equalsIgnoreCase("http")) {
        throw new IllegalStateException("scheme " + targetUri.getScheme() + " is not supported");
    }
    HeaderGroup headerGroup = computeHeaders(targetUri);
    for (Entry<String, List<String>> headerEntry : headers.getRequestHeaders().entrySet()) {
        for (String value : headerEntry.getValue()) {
            headerGroup.addHeader(new Header(headerEntry.getKey(), value));
        }
    }
    getLogger().info("Proxying " + getName() + " to " + targetUri);
    try {
        Socket socket = socketFactory.openConnection(host, port);
        try {
            // initiate an HTTP request with Transfer-Encoding: chunked
            OutputStream proxyOut = socket.getOutputStream();
            emitHttpRequestLine(proxyOut, targetUri);
            emitHeaders(proxyOut, headerGroup);

            proxyOut.flush();

            List<Callable<Void>> tasks = new ArrayList<Callable<Void>>(3);
            FlushingChunkedOutputStream chunkedRequestOut = new FlushingChunkedOutputStream(proxyOut);
            tasks.add(new InputPipe(in, chunkedRequestOut, bufferSize, Thread.currentThread()).flush(true));

            // start these pipes
            ExecutorService executor = Executors.newFixedThreadPool(tasks.size());
            try {
                for (Callable<Void> task : tasks) {
                    executor.submit(task);
                }

                InputStream proxyInput = socket.getInputStream();
                try {
                    readHttpResponse(proxyInput);
                    MultiplexingInputStream input = new MultiplexingInputStream(
                            new ChunkedInputStream(proxyInput));
                    for (;;) {
                        PacketType packetType = input.getPacketType();
                        if (packetType == null) {
                            break;
                        }
                        int length = input.getPacketLength();

                        processData(input, packetType, length);
                    }
                } finally {
                    try {
                        executor.shutdown();
                        executor.awaitTermination(1000L, TimeUnit.MILLISECONDS);
                    } catch (InterruptedException e) {
                        // ignore
                    }
                }
            } finally {
                executor.shutdownNow();
                try {
                    executor.awaitTermination(3000L, TimeUnit.MILLISECONDS);
                } catch (InterruptedException e) {
                    // ignore
                }
                Thread.interrupted();

                try {
                    // attempt to close the chunked output, since this will make us a well-behaved client
                    // by sending the closing chunk.
                    chunkedRequestOut.close();
                } catch (Throwable t) {
                    // ignore
                }
            }
        } finally {
            socket.close();
        }
    } catch (ConnectException e) {
        getLogger().error(e.getMessage(), e);
        throw new CommandException(-1, "Service temporarily unavailable");
    } catch (IOException e) {
        getLogger().warn(e.getMessage(), e);
        throw new CommandException(-1, e.getMessage());
    }
}

From source file:org.geoserver.bkprst.RestoreTask.java

@Override
public void run() {

    // If previous' backup info cannot be read, aborts the restore
    // Writes info about backup in a file
    BackupTask backupInfo = this.readBackupInfo(this.path);
    if (backupInfo == null) {
        LOGGER.severe("Backup data info were not written properly, the restore will not start");
        this.state = BrTaskState.FAILED;
        return;//from   w w  w .  j  a va 2 s.c o  m
    }

    // Sets up the filter to exclude some directories according to the previous backup info
    IOFileFilter excludeFilter = this.getExcludeFilter(backupInfo.includeData, backupInfo.includeGwc,
            backupInfo.includeLog);

    // Sets up source and destination
    File srcMount = new File(this.path);
    File trgMount = this.dataRoot.root();

    // Sets transaction
    this.trans = new RestoreTransaction(this, srcMount, trgMount, excludeFilter);

    try {
        // Start transanction
        this.trans.start();
        if (checkForHalt()) {
            return;
        }

        // Sets up the copy task
        ExecutorService ex = Executors.newFixedThreadPool(2);
        if (ex == null || ex.isTerminated()) {
            throw new IllegalArgumentException(
                    "Unable to run asynchronously using a terminated or null ThreadPoolExecutor");
        }
        ExecutorCompletionService<File> cs = new ExecutorCompletionService<File>(ex);

        this.act = new CopyTree(excludeFilter, cs, srcMount, trgMount);
        this.act.addCopyListener(new DefaultProgress(this.id.toString()) {
            public void onUpdateProgress(float percent) {
                super.onUpdateProgress(percent);
                progress = percent;
            }
        });

        // Starts restore
        int workSize = this.act.copy();
        LOGGER.info("Restore " + this.id + " has started");
        this.startTime = new Date();
        this.state = BrTaskState.RUNNING;

        // This is to keep track af restore advancement
        while (workSize-- > 0) {
            Future<File> future = cs.take();
            try {
                LOGGER.info("copied file: " + future.get());
            } catch (ExecutionException e) {

                LOGGER.log(Level.INFO, e.getLocalizedMessage(), e);
            }
            if (checkForHalt()) {
                ex.shutdown();
                if (!ex.awaitTermination(5, TimeUnit.SECONDS)) {
                    throw new RuntimeException("Unable to stop backup task");
                }
                return;
            }
        }

        // Restore completed
        this.trans.commit();

        // reload the config from disk
        getGeoServer().reload();
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, e.getLocalizedMessage(), e);

        // In case of errors, rollback
        this.trans.rollback();
    } finally {
        haltSemaphore.release();
    }
}

From source file:com.linkedin.pinot.integration.tests.HybridClusterScanComparisonIntegrationTest.java

@Override
@BeforeClass// w ww .j a va 2 s  .  c  o  m
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_offlineSegmentDir);
    ensureDirectoryExistsAndIsEmpty(_realtimeSegmentDir);
    ensureDirectoryExistsAndIsEmpty(_offlineTarDir);
    ensureDirectoryExistsAndIsEmpty(_realtimeTarDir);
    ensureDirectoryExistsAndIsEmpty(_unpackedSegments);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    extractAvroIfNeeded();

    int avroFileCount = getAvroFileCount();
    Preconditions.checkArgument(3 <= avroFileCount, "Need at least three Avro files for this test");

    setSegmentCount(avroFileCount);
    setOfflineSegmentCount(2);
    setRealtimeSegmentCount(avroFileCount - 1);

    final List<File> avroFiles = getAllAvroFiles();

    _schemaFile = getSchemaFile();
    _schema = Schema.fromFile(_schemaFile);

    // Create Pinot table
    setUpTable("mytable", getTimeColumnName(), getTimeColumnType(), KafkaStarterUtils.DEFAULT_ZK_STR,
            KAFKA_TOPIC, _schemaFile, avroFiles.get(0), getSortedColumn(), invertedIndexColumns);

    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Create segments from Avro data
    ExecutorService executor;
    if (_createSegmentsInParallel) {
        executor = Executors.newCachedThreadPool();
    } else {
        executor = Executors.newSingleThreadExecutor();

    }
    Future<Map<File, File>> offlineAvroToSegmentMapFuture = buildSegmentsFromAvro(offlineAvroFiles, executor, 0,
            _offlineSegmentDir, _offlineTarDir, "mytable", false, _schema);
    Future<Map<File, File>> realtimeAvroToSegmentMapFuture = buildSegmentsFromAvro(realtimeAvroFiles, executor,
            0, _realtimeSegmentDir, _realtimeTarDir, "mytable", false, _schema);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    // Redeem futures
    _offlineAvroToSegmentMap = offlineAvroToSegmentMapFuture.get();
    _realtimeAvroToSegmentMap = realtimeAvroToSegmentMapFuture.get();

    LOGGER.info("Offline avro to segment map: {}", _offlineAvroToSegmentMap);
    LOGGER.info("Realtime avro to segment map: {}", _realtimeAvroToSegmentMap);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", getOfflineSegmentCount());

    // Upload the offline segments
    int i = 0;
    for (String segmentName : _offlineTarDir.list()) {
        i++;
        LOGGER.info("Uploading segment {} : {}", i, segmentName);
        File file = new File(_offlineTarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    _compareStatusFileWriter = getLogWriter();
    _scanRspFileWriter = getScanRspRecordFileWriter();
    _compareStatusFileWriter.write("Start time:" + System.currentTimeMillis() + "\n");
    _compareStatusFileWriter.flush();
    startTimeMs = System.currentTimeMillis();
    LOGGER.info("Setup completed");
}

From source file:com.wavemaker.tools.apidocs.tools.spring.SpringSwaggerParserTest.java

@Test
public void testMultiThread() throws InterruptedException {
    ExecutorService service = Executors.newFixedThreadPool(4);
    List<Class<?>> controllerClasses = new ArrayList<>();
    controllerClasses.add(VacationController.class);
    controllerClasses.add(UserController.class);
    controllerClasses.add(DepartmentController.class);

    for (final Class<?> controllerClass : controllerClasses) {
        service.execute(new Runnable() {
            public void run() {
                Swagger swagger;//from   w w  w.j a  va2  s .  c om
                try {
                    swagger = runForSingleClass(controllerClass);
                } catch (SwaggerParserException e) {
                    throw new RuntimeException("Exception while parsing class:" + controllerClass.getName(), e);
                }
                Assert.assertNotNull(swagger);
                assertEquals(1, swagger.getTags().size());
                assertEquals(controllerClass.getName(), swagger.getTags().get(0).getFullyQualifiedName());
                try {
                    writeToFile(swagger, "class_" + controllerClass.getSimpleName() + ".json");
                } catch (IOException e) {
                    throw new RuntimeException("Error while writing to file", e);
                }
            }
        });
    }

    service.shutdown();
    service.awaitTermination(10, TimeUnit.SECONDS);
}