Example usage for java.util.concurrent ExecutorService isTerminated

List of usage examples for java.util.concurrent ExecutorService isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isTerminated.

Prototype

boolean isTerminated();

Source Link

Document

Returns true if all tasks have completed following shut down.

Usage

From source file:Main.java

public void processUsers(int numOfWorkerThreads) {
    ExecutorService threadPool = Executors.newFixedThreadPool(numOfWorkerThreads);
    int chunk = itemsToBeProcessed.length / numOfWorkerThreads;
    int start = 0;
    List<Future> tasks = new ArrayList<Future>();
    for (int i = 0; i < numOfWorkerThreads; i++) {
        tasks.add(threadPool.submit(new WorkerThread(start, start + chunk)));
        start = start + chunk;//  ww w .  j a v a  2s. c om
    }
    // join all worker threads to main thread
    for (Future f : tasks) {
        try {
            f.get();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
    threadPool.shutdown();
    while (!threadPool.isTerminated()) {
    }
}

From source file:org.apache.flume.sink.hdfs.HDFSEventSink.java

@Override
public void stop() {
    // do not constrain close() calls with a timeout
    synchronized (sfWritersLock) {
        for (Entry<String, BucketWriter> entry : sfWriters.entrySet()) {
            LOG.info("Closing {}", entry.getKey());

            try {
                entry.getValue().close();
            } catch (Exception ex) {
                LOG.warn("Exception while closing " + entry.getKey() + ". " + "Exception follows.", ex);
                if (ex instanceof InterruptedException) {
                    Thread.currentThread().interrupt();
                }//from   ww  w  .j  av a 2 s  . c o m
            }
        }
    }

    // shut down all our thread pools
    ExecutorService toShutdown[] = { callTimeoutPool, timedRollerPool };
    for (ExecutorService execService : toShutdown) {
        execService.shutdown();
        try {
            while (execService.isTerminated() == false) {
                execService.awaitTermination(Math.max(defaultCallTimeout, callTimeout), TimeUnit.MILLISECONDS);
            }
        } catch (InterruptedException ex) {
            LOG.warn("shutdown interrupted on " + execService, ex);
        }
    }

    callTimeoutPool = null;
    timedRollerPool = null;

    synchronized (sfWritersLock) {
        sfWriters.clear();
        sfWriters = null;
    }
    sinkCounter.stop();
    super.stop();
}

From source file:org.apache.bookkeeper.common.util.OrderedExecutor.java

/**
 * {@inheritDoc}/* w ww .  j  a  v  a  2  s.c o  m*/
 */
@Override
public boolean isTerminated() {
    for (ExecutorService executor : threads) {
        if (!executor.isTerminated()) {
            return false;
        }
    }
    return true;
}

From source file:net.darkmist.clf.Main.java

private void handleFiles(String fileNames[], int off, int len) {
    DirTraverser traverser;//from  ww w .j  ava2  s.c o  m
    Queue<File> files;
    ExecutorService executor;

    // convert fileNames to Files and put them in a Queue
    files = Util.newQueue(Util.getStringToFileConverter(), fileNames, off, len);
    //executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    executor = MoreExecutors.newCurrentThreadPool();

    traverser = new DirTraverser(files, new ExecutorFileHandler(executor, this));

    // let her rip
    traverser.run();

    // all done traversing... shutdown the executor
    executor.shutdown();
    // and wait for it
    while (!executor.isTerminated()) {
        try {
            executor.awaitTermination(STATUS_TIME, STATUS_UNIT);
        } catch (InterruptedException e) {
            logger.warn("Ignoring InterruptedException until thread pool executor stops", e);
        }
        if (logger.isDebugEnabled() && executor instanceof ThreadPoolExecutor) {
            ThreadPoolExecutor pool = (ThreadPoolExecutor) executor;
            logger.debug("ThreadPool size=" + pool.getPoolSize() + " active=" + pool.getActiveCount()
                    + " queue=" + pool.getQueue().size());
        }
    }
    executor = null;
    logger.debug("handleFiles done...");
}

From source file:net.mindengine.galen.GalenMain.java

private void runSuitesInThreads(List<GalenSuite> suites, int parallelSuites, final CompleteListener listener) {
    ExecutorService executor = Executors.newFixedThreadPool(parallelSuites);
    for (final GalenSuite suite : suites) {
        Runnable thread = new Runnable() {
            @Override//from   ww  w  .j ava 2s  . c  o m
            public void run() {
                GalenSuiteRunner suiteRunner = new GalenSuiteRunner();
                suiteRunner.setSuiteListener(listener);
                suiteRunner.setValidationListener(listener);
                suiteRunner.runSuite(suite);
            }
        };

        executor.execute(thread);
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
    }
}

From source file:org.jumpmind.metl.core.runtime.flow.StepRuntime.java

private void shutdownThreads(boolean waitForShutdown) {
    if (this.componentRuntimeExecutor instanceof ExecutorService) {
        try {/*from   w  ww.  java 2  s. c om*/
            ExecutorService service = (ExecutorService) this.componentRuntimeExecutor;
            service.shutdown();
            while (waitForShutdown && !service.isTerminated()) {
                service.awaitTermination(500, TimeUnit.MILLISECONDS);
            }
        } catch (Exception e) {
            recordError(1, e);
        }
    }
}

From source file:com.linkedin.pinot.controller.helix.PinotResourceManagerTest.java

/**
 * Creates 5 threads that concurrently try to add 20 segments each, and asserts that we have
 * 100 segments in the end. Then launches 5 threads again that concurrently try to delete all segments,
 * and makes sure that we have zero segments left in the end.
 * @throws Exception//from w ww  .  ja v  a2 s  .  co m
 */

@Test
public void testConcurrentAddingAndDeletingSegments() throws Exception {
    ExecutorService addSegmentExecutor = Executors.newFixedThreadPool(5);

    for (int i = 0; i < 5; ++i) {
        addSegmentExecutor.execute(new Runnable() {

            @Override
            public void run() {
                for (int i = 0; i < 20; ++i) {
                    addOneSegment(TABLE_NAME);
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        Assert.assertFalse(true, "Exception caught during sleep.");
                    }
                }
            }
        });
    }
    addSegmentExecutor.shutdown();
    while (!addSegmentExecutor.isTerminated()) {
    }

    final String offlineTableName = TableNameBuilder.OFFLINE_TABLE_NAME_BUILDER.forTable(TABLE_NAME);
    IdealState idealState = _helixAdmin.getResourceIdealState(HELIX_CLUSTER_NAME, offlineTableName);
    Assert.assertEquals(idealState.getPartitionSet().size(), 100);

    ExecutorService deleteSegmentExecutor = Executors.newFixedThreadPool(5);
    for (final String segment : idealState.getPartitionSet()) {
        deleteSegmentExecutor.execute(new Runnable() {

            @Override
            public void run() {
                deleteOneSegment(offlineTableName, segment);
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    Assert.assertFalse(true, "Exception caught during sleep.");
                }
            }
        });
    }
    deleteSegmentExecutor.shutdown();
    while (!deleteSegmentExecutor.isTerminated()) {
    }

    idealState = _helixAdmin.getResourceIdealState(HELIX_CLUSTER_NAME, offlineTableName);
    Assert.assertEquals(idealState.getPartitionSet().size(), 0);
}

From source file:org.sonarsource.sonarlint.core.mediumtest.StandaloneIssueMediumTest.java

@Test
public void concurrentAnalysis() throws Throwable {
    final ClientInputFile inputFile = prepareInputFile("Foo.java",
            "public class Foo {\n" + "  public void foo() {\n" + "    int x;\n"
                    + "    System.out.println(\"Foo\");\n" + "    System.out.println(\"Foo\"); //NOSONAR\n"
                    + "  }\n" + "}",
            false);//  w w  w.java 2 s  .c  o m

    final Path workDir = temp.newFolder().toPath();

    int parallelExecutions = 4;

    ExecutorService executor = Executors.newFixedThreadPool(parallelExecutions);

    List<Future<?>> results = new ArrayList<>();
    for (int i = 0; i < parallelExecutions; i++) {

        Runnable worker = new Runnable() {
            @Override
            public void run() {
                sonarlint.analyze(new StandaloneAnalysisConfiguration(baseDir.toPath(), workDir,
                        Arrays.asList(inputFile), ImmutableMap.of()), issue -> {
                        });
            }
        };
        results.add(executor.submit(worker));
    }
    executor.shutdown();

    while (!executor.isTerminated()) {
    }

    for (Future<?> future : results) {
        try {
            future.get();
        } catch (ExecutionException e) {
            throw e.getCause();
        }
    }

}

From source file:com.cloud.hypervisor.kvm.resource.wrapper.LibvirtMigrateCommandWrapper.java

@Override
public Answer execute(final MigrateCommand command, final LibvirtComputingResource libvirtComputingResource) {
    final String vmName = command.getVmName();

    String result = null;/*w ww  .  j  av  a2s  . com*/

    List<InterfaceDef> ifaces = null;
    List<DiskDef> disks;

    Domain dm = null;
    Connect dconn = null;
    Domain destDomain = null;
    Connect conn = null;
    String xmlDesc = null;
    List<Ternary<String, Boolean, String>> vmsnapshots = null;

    try {
        final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource
                .getLibvirtUtilitiesHelper();

        conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
        ifaces = libvirtComputingResource.getInterfaces(conn, vmName);
        disks = libvirtComputingResource.getDisks(conn, vmName);
        dm = conn.domainLookupByName(vmName);
        /*
        We replace the private IP address with the address of the destination host.
        This is because the VNC listens on the private IP address of the hypervisor,
        but that address is of course different on the target host.
                
        MigrateCommand.getDestinationIp() returns the private IP address of the target
        hypervisor. So it's safe to use.
                
        The Domain.migrate method from libvirt supports passing a different XML
        description for the instance to be used on the target host.
                
        This is supported by libvirt-java from version 0.50.0
                
        CVE-2015-3252: Get XML with sensitive information suitable for migration by using
                       VIR_DOMAIN_XML_MIGRATABLE flag (value = 8)
                       https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainXMLFlags
                
                       Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
         */
        final int xmlFlag = conn.getLibVirVersion() >= 1000000 ? 8 : 1; // 1000000 equals v1.0.0

        final String target = command.getDestinationIp();
        xmlDesc = dm.getXMLDesc(xmlFlag);
        xmlDesc = replaceIpForVNCInDescFile(xmlDesc, target);

        // delete the metadata of vm snapshots before migration
        vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm);

        Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage = command.getMigrateStorage();
        // migrateStorage is declared as final because the replaceStorage method may mutate mapMigrateStorage, but
        // migrateStorage's value should always only be associated with the initial state of mapMigrateStorage.
        final boolean migrateStorage = MapUtils.isNotEmpty(mapMigrateStorage);

        if (migrateStorage) {
            xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage);
        }

        dconn = libvirtUtilitiesHelper
                .retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system");

        //run migration in thread so we can monitor it
        s_logger.info("Live migration of instance " + vmName + " initiated");
        final ExecutorService executor = Executors.newFixedThreadPool(1);
        final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc,
                migrateStorage, command.isAutoConvergence(), vmName, command.getDestinationIp());
        final Future<Domain> migrateThread = executor.submit(worker);
        executor.shutdown();
        long sleeptime = 0;
        while (!executor.isTerminated()) {
            Thread.sleep(100);
            sleeptime += 100;
            if (sleeptime == 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
                final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
                if (migrateDowntime > 0) {
                    try {
                        final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
                        if (setDowntime == 0) {
                            s_logger.debug("Set max downtime for migration of " + vmName + " to "
                                    + String.valueOf(migrateDowntime) + "ms");
                        }
                    } catch (final LibvirtException e) {
                        s_logger.debug(
                                "Failed to set max downtime for migration, perhaps migration completed? Error: "
                                        + e.getMessage());
                    }
                }
            }
            if (sleeptime % 1000 == 0) {
                s_logger.info(
                        "Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms");
            }

            // pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
            final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
            if (migratePauseAfter > 0 && sleeptime > migratePauseAfter) {
                DomainState state = null;
                try {
                    state = dm.getInfo().state;
                } catch (final LibvirtException e) {
                    s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
                }
                if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) {
                    try {
                        s_logger.info(
                                "Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to "
                                        + migratePauseAfter + "ms to complete migration");
                        dm.suspend();
                    } catch (final LibvirtException e) {
                        // pause could be racy if it attempts to pause right when vm is finished, simply warn
                        s_logger.info("Failed to pause vm " + vmName + " : " + e.getMessage());
                    }
                }
            }
        }
        s_logger.info("Migration thread for " + vmName + " is done");

        destDomain = migrateThread.get(10, TimeUnit.SECONDS);

        if (destDomain != null) {
            for (final DiskDef disk : disks) {
                libvirtComputingResource.cleanupDisk(disk);
            }
        }

    } catch (final LibvirtException e) {
        s_logger.debug("Can't migrate domain: " + e.getMessage());
        result = e.getMessage();
    } catch (final InterruptedException e) {
        s_logger.debug("Interrupted while migrating domain: " + e.getMessage());
        result = e.getMessage();
    } catch (final ExecutionException e) {
        s_logger.debug("Failed to execute while migrating domain: " + e.getMessage());
        result = e.getMessage();
    } catch (final TimeoutException e) {
        s_logger.debug("Timed out while migrating domain: " + e.getMessage());
        result = e.getMessage();
    } catch (final IOException e) {
        s_logger.debug("IOException: " + e.getMessage());
        result = e.getMessage();
    } catch (final ParserConfigurationException e) {
        s_logger.debug("ParserConfigurationException: " + e.getMessage());
        result = e.getMessage();
    } catch (final SAXException e) {
        s_logger.debug("SAXException: " + e.getMessage());
        result = e.getMessage();
    } catch (final TransformerConfigurationException e) {
        s_logger.debug("TransformerConfigurationException: " + e.getMessage());
        result = e.getMessage();
    } catch (final TransformerException e) {
        s_logger.debug("TransformerException: " + e.getMessage());
        result = e.getMessage();
    } finally {
        try {
            if (dm != null && result != null) {
                // restore vm snapshots in case of failed migration
                if (vmsnapshots != null) {
                    libvirtComputingResource.restoreVMSnapshotMetadata(dm, vmName, vmsnapshots);
                }
            }
            if (dm != null) {
                if (dm.isPersistent() == 1) {
                    dm.undefine();
                }
                dm.free();
            }
            if (dconn != null) {
                dconn.close();
            }
            if (destDomain != null) {
                destDomain.free();
            }
        } catch (final LibvirtException e) {
            s_logger.trace("Ignoring libvirt error.", e);
        }
    }

    if (result != null) {
    } else {
        libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
        for (final InterfaceDef iface : ifaces) {
            // We don't know which "traffic type" is associated with
            // each interface at this point, so inform all vif drivers
            final List<VifDriver> allVifDrivers = libvirtComputingResource.getAllVifDrivers();
            for (final VifDriver vifDriver : allVifDrivers) {
                vifDriver.unplug(iface);
            }
        }
    }

    return new MigrateAnswer(command, result == null, result, null);
}

From source file:it.wami.map.mongodeploy.OsmSaxHandler.java

@Override
public void endElement(String uri, String localName, String qName) throws SAXException {
    super.endElement(uri, localName, qName);

    if (qName == NODE) {
        save(entry, COLL_NODES);//from  w  w  w.java2s.c  om
    }
    if (qName == WAY) {
        if (!nodesQueue.isEmpty()) {
            System.out.println("remaining nodes: " + nodesQueue.size());
            saveEntry(nodesQueue, COLL_NODES);
        }
        if (options.isWayGeometry()) {
            populateWayGeo((Way) entry);
        } else {
            standardWay((Way) entry);
        }
    }
    if (qName == RELATION) {
        if (!waysRunnables.isEmpty()) {
            System.out.println("remaining ways: " + waysRunnables.size());
            int cores = Runtime.getRuntime().availableProcessors();
            ExecutorService executorService = Executors.newFixedThreadPool(cores);
            for (Runnable currentRunnable : waysRunnables) {
                executorService.execute(currentRunnable);
            }
            executorService.shutdown();
            while (!executorService.isTerminated()) {
            }
            waysRunnables.clear();
            saveEntry(waysQueue, COLL_WAYS);
            //saveEntry(waysQueue, COLL_WAYS);
        }
        if (!waysQueue.isEmpty()) {
            System.out.println("remaining ways: " + waysRunnables.size());
            saveEntry(waysQueue, COLL_WAYS);
        }
        if (options.isRelationGeometry()) {
            populateRelation((Relation) entry);
        } else {
            save(entry, COLL_RELATIONS);
        }
    }
}