Example usage for java.util.concurrent ScheduledExecutorService shutdownNow

List of usage examples for java.util.concurrent ScheduledExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ScheduledExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:org.apache.jackrabbit.core.RepositoryImpl.java

/**
 * Protected method that performs the actual shutdown after the shutdown
 * lock has been acquired by the {@link #shutdown()} method.
 *//*from   w  w  w  .  j  a v  a 2s . c o  m*/
protected synchronized void doShutdown() {
    log.info("Shutting down repository...");

    // stop optional cluster node
    ClusterNode clusterNode = context.getClusterNode();
    if (clusterNode != null) {
        clusterNode.stop();
    }

    if (securityMgr != null) {
        securityMgr.close();
    }

    // close active user sessions
    // (copy sessions to array to avoid ConcurrentModificationException;
    // manually copy entries rather than calling ReferenceMap#toArray() in
    // order to work around  http://issues.apache.org/bugzilla/show_bug.cgi?id=25551)
    List<Session> sa;
    synchronized (activeSessions) {
        sa = new ArrayList<Session>(activeSessions.size());
        for (Session session : activeSessions.values()) {
            sa.add(session);
        }
    }
    for (Session session : sa) {
        if (session != null) {
            session.logout();
        }
    }

    // shutdown system search manager if there is one
    if (systemSearchMgr != null) {
        systemSearchMgr.close();
    }

    // shut down workspaces
    synchronized (wspInfos) {
        for (WorkspaceInfo wspInfo : wspInfos.values()) {
            wspInfo.dispose();
        }
    }

    try {
        InternalVersionManager m = context.getInternalVersionManager();
        if (m != null) {
            m.close();
        }
    } catch (Exception e) {
        log.error("Error while closing Version Manager.", e);
    }

    repDescriptors.clear();

    DataStore dataStore = context.getDataStore();
    if (dataStore != null) {
        try {
            // close the datastore
            dataStore.close();
        } catch (DataStoreException e) {
            log.error("error while closing datastore", e);
        }
    }

    try {
        // close repository file system
        context.getFileSystem().close();
    } catch (FileSystemException e) {
        log.error("error while closing repository file system", e);
    }

    try {
        nodeIdFactory.close();
    } catch (RepositoryException e) {
        log.error("error while closing repository file system", e);
    }

    // make sure this instance is not used anymore
    disposed = true;

    // wake up threads waiting on this instance's monitor (e.g. workspace janitor)
    notifyAll();

    // Shut down the executor service
    ScheduledExecutorService executor = context.getExecutor();
    executor.shutdown();
    try {
        // Wait for all remaining background threads to terminate
        if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
            log.warn("Attempting to forcibly shutdown runaway threads");
            executor.shutdownNow();
        }
    } catch (InterruptedException e) {
        log.warn("Interrupted while waiting for background threads", e);
    }

    repConfig.getConnectionFactory().close();

    // finally release repository lock
    if (repLock != null) {
        try {
            repLock.release();
        } catch (RepositoryException e) {
            log.error("failed to release the repository lock", e);
        }
    }

    log.info("Repository has been shutdown");
}

From source file:org.openbmp.db_rest.resources.Orr.java

/**
 * Get IGP RIB with merged BGP RIB for given RouterId
 *
 * Merged BGP RIB will contain duplicate entries for selected BGP paths based
 *     on optimized next-hop selection and RR selection. For example:
 *
 *     prefix 10.0.0.0/8 is equal in attributes but has two possible next-hops
 *                  2.2.2.2 and 4.4.4.4, resulting tie breaker is IGP metric.
 *
 *        Local routerId IGP has metric 20 to 2.2.2.2 and metric 31 to 4.4.4.4
 *        RR IGP has metric 10 to 4.4.4.4 and 31 to 2.2.2.2.
 *
 *        Merged table would contain://w  w w .  jav a  2  s  .c o  m
 *          10.0.0.0/8 via 2.2.2.2 metric 20 (metric 31 on rr) marked as preferred (orr selected)
 *          10.0.0.0/8 via 4.4.4.4 metric 31 (metric 10 on rr) marked as not preferred (rr selected w/o orr)
 *
 * NOTE:  The below method assumes (thus requires) that the bgp peer router
 *        advertising the link-state data is the route-reflector.  In other words,
 *        peerHashId (bgp peer hash) learned from bmp router_hash_id is the
 *        route-reflector.
 *
 *        We can change this to allow selection of BGP peers (one or more bmp routers)
 *        for the BGP RIB merge, but that requires more path/query params and is not needed right now.
 *
 *  @param peerHashId       Peer Hash ID of the BGP peer advertising link state information
 *  @param protocol         Either 'ospf' or 'isis'
 *  @param routerId         IPv4 or IPv6 print form router ID (use IPv4/IPv6 rid for ISIS)
 *  @param where            Advanced WHERE clause to filter the BGP merged prefixes
 */
@GET
@Path("/peer/{peerHashId}/{protocol}/{routerId}")
@Produces("application/json")
public Response getLsOspfIGP(@PathParam("peerHashId") String peerHashId, @PathParam("protocol") String protocol,
        @PathParam("routerId") String routerId, @QueryParam("where") String where) {

    long startTime = System.currentTimeMillis();

    ScheduledExecutorService thrPool = Executors.newScheduledThreadPool(2);
    queryThread thrs[] = new queryThread[2];

    // Get IGP for requested router
    thrs[0] = new queryThread(this);
    thrs[0].setArgs(new String[] { peerHashId, routerId, protocol, "30" });
    thrPool.schedule(thrs[0], 0, TimeUnit.MILLISECONDS);

    // Get the router's local router id
    // TODO: Change to support better identification of route reflector
    Map<String, List<DbColumnDef>> rrMap = getPeerRouterId(peerHashId);

    String rr_routerId = null;
    if (rrMap.size() > 0)
        rr_routerId = rrMap.entrySet().iterator().next().getValue().get(0).getValue();

    if (rr_routerId == null) {
        System.out.println("Unable to get the routers routerID by peer hash " + peerHashId);
        return RestResponse.okWithBody("{}");
    }

    // Get the RR IGP
    thrs[1] = new queryThread(this);
    thrs[1].setArgs(new String[] { peerHashId, rr_routerId, protocol, "120" });
    thrPool.schedule(thrs[1], 0, TimeUnit.MILLISECONDS);

    // Wait for IGP query and store the results
    waitForThread(thrs[0]);
    Map<String, List<DbColumnDef>> igpMap = thrs[0].getResults();

    if (igpMap.size() <= 0) {
        return RestResponse.okWithBody("{}");
    }

    // Get the BGP RIB from RR router
    List<DbColumnDef> row = igpMap.entrySet().iterator().next().getValue();
    String routerHashId = row.get(row.size() - 1).getValue();

    String where_str = "router_hash_id = '" + routerHashId + "'";

    if (where != null)
        where_str += " and " + where;

    StringBuilder query = new StringBuilder();
    query.append("SELECT Prefix as prefix,PrefixLen as prefix_len,LocalPref,ASPath_Count,Origin,MED,NH\n");
    query.append("     FROM v_routes WHERE ");
    query.append(where_str);
    query.append(" ORDER BY prefix_bin,PrefixLen LIMIT 1000\n");

    Map<String, List<DbColumnDef>> bgpMap = DbUtils.select_DbToMap(mysql_ds, query.toString());

    // Wait for RR IGP query and store the results
    waitForThread(thrs[1]);
    Map<String, List<DbColumnDef>> rr_igpMap = thrs[1].getResults();

    //long queryTime = System.currentTimeMillis() - startTime;

    thrPool.shutdownNow();

    /*
     * Merge BGP RIB into IGP
     */

    Map<String, List<DbColumnDef>> mergeMap = new HashMap<String, List<DbColumnDef>>();
    mergeMap.putAll(igpMap);

    mergeMap.putAll(mergeIgpWithBgp(igpMap, bgpMap, null, false));

    mergeMap.putAll(mergeIgpWithBgp(rr_igpMap, bgpMap, igpMap, true));

    long queryTime = System.currentTimeMillis() - startTime;

    return RestResponse.okWithBody(DbUtils.DbMapToJson("orr", mergeMap, queryTime));
}

From source file:org.apache.hadoop.hive.serde2.objectinspector.TestReflectionObjectInspectors.java

public void testObjectInspectorThreadSafety() throws InterruptedException {
    final int workerCount = 5; // 5 workers to run getReflectionObjectInspector concurrently
    final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(workerCount);
    final MutableObject exception = new MutableObject();
    Thread runner = new Thread(new Runnable() {
        @Override// w  w  w. ja  v  a 2s.co  m
        @SuppressWarnings("unchecked")
        public void run() {
            Future<ObjectInspector>[] results = (Future<ObjectInspector>[]) new Future[workerCount];
            ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>[] types = (ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>[]) new ObjectPair[] {
                    new ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>(Complex.class,
                            ObjectInspectorFactory.ObjectInspectorOptions.THRIFT),
                    new ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions>(MyStruct.class,
                            ObjectInspectorFactory.ObjectInspectorOptions.JAVA), };
            try {
                for (int i = 0; i < 20; i++) { // repeat 20 times
                    for (final ObjectPair<Type, ObjectInspectorFactory.ObjectInspectorOptions> t : types) {
                        ObjectInspectorFactory.objectInspectorCache.clear();
                        for (int k = 0; k < workerCount; k++) {
                            results[k] = executorService.schedule(new Callable<ObjectInspector>() {
                                @Override
                                public ObjectInspector call() throws Exception {
                                    return ObjectInspectorFactory.getReflectionObjectInspector(t.getFirst(),
                                            t.getSecond());
                                }
                            }, 50, TimeUnit.MILLISECONDS);
                        }
                        ObjectInspector oi = results[0].get();
                        for (int k = 1; k < workerCount; k++) {
                            assertEquals(oi, results[k].get());
                        }
                    }
                }
            } catch (Throwable e) {
                exception.setValue(e);
            }
        }
    });
    try {
        runner.start();
        long endTime = System.currentTimeMillis() + 300000; // timeout in 5 minutes
        while (runner.isAlive()) {
            if (System.currentTimeMillis() > endTime) {
                runner.interrupt(); // Interrupt the runner thread
                fail("Timed out waiting for the runner to finish");
            }
            runner.join(10000);
        }
        if (exception.getValue() != null) {
            fail("Got exception: " + exception.getValue());
        }
    } finally {
        executorService.shutdownNow();
    }
}

From source file:org.apache.ambari.server.bootstrap.BSRunner.java

@Override
public void run() {

    if (sshHostInfo.getSshKey() == null || sshHostInfo.getSshKey().equals("")) {
        beforeBootStrap(sshHostInfo);/*from   www  . j ava 2 s .com*/
    }

    String hostString = createHostString(sshHostInfo.getHosts());
    String user = sshHostInfo.getUser();
    String userRunAs = sshHostInfo.getUserRunAs();
    if (user == null || user.isEmpty()) {
        user = DEFAULT_USER;
    }
    String command[] = new String[12];
    BSStat stat = BSStat.RUNNING;
    String scriptlog = "";
    try {
        createRunDir();
        if (LOG.isDebugEnabled()) {
            // FIXME needs to be removed later
            // security hole
            LOG.debug("Using ssh key=\"" + sshHostInfo.getSshKey() + "\"");
        }

        String password = sshHostInfo.getPassword();
        if (password != null && !password.isEmpty()) {
            this.passwordFile = new File(this.requestIdDir, "host_pass");
            // TODO : line separator should be changed
            // if we are going to support multi platform server-agent solution
            String lineSeparator = System.getProperty("line.separator");
            password = password + lineSeparator;
            writePasswordFile(password);
        }

        writeSshKeyFile(sshHostInfo.getSshKey());
        /* Running command:
         * script hostlist bsdir user sshkeyfile
         */
        command[0] = this.bsScript;
        command[1] = hostString;
        command[2] = this.requestIdDir.toString();
        command[3] = user;
        command[4] = this.sshKeyFile.toString();
        command[5] = this.agentSetupScript.toString();
        command[6] = this.ambariHostname;
        command[7] = this.clusterOsFamily;
        command[8] = this.projectVersion;
        command[9] = this.serverPort + "";
        command[10] = userRunAs;
        command[11] = (this.passwordFile == null) ? "null" : this.passwordFile.toString();
        LOG.info("Host= " + hostString + " bs=" + this.bsScript + " requestDir=" + requestIdDir + " user="
                + user + " keyfile=" + this.sshKeyFile + " passwordFile " + this.passwordFile + " server="
                + this.ambariHostname + " version=" + projectVersion + " serverPort=" + this.serverPort
                + " userRunAs=" + userRunAs);

        String[] env = new String[] { "AMBARI_PASSPHRASE=" + agentSetupPassword };
        if (this.verbose)
            env = new String[] { env[0], " BS_VERBOSE=\"-vvv\" " };

        if (LOG.isDebugEnabled()) {
            LOG.debug(Arrays.toString(command));
        }

        String bootStrapOutputFilePath = requestIdDir + File.separator + "bootstrap.out";
        String bootStrapErrorFilePath = requestIdDir + File.separator + "bootstrap.err";

        Process process = Runtime.getRuntime().exec(command, env);

        PrintWriter stdOutWriter = null;
        PrintWriter stdErrWriter = null;

        try {
            stdOutWriter = new PrintWriter(bootStrapOutputFilePath);
            stdErrWriter = new PrintWriter(bootStrapErrorFilePath);
            IOUtils.copy(process.getInputStream(), stdOutWriter);
            IOUtils.copy(process.getErrorStream(), stdErrWriter);
        } finally {
            if (stdOutWriter != null)
                stdOutWriter.close();

            if (stdErrWriter != null)
                stdErrWriter.close();
        }

        // Startup a scheduled executor service to look through the logs
        ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
        BSStatusCollector statusCollector = new BSStatusCollector();
        ScheduledFuture<?> handle = scheduler.scheduleWithFixedDelay(statusCollector, 0, 10, TimeUnit.SECONDS);
        LOG.info("Kicking off the scheduler for polling on logs in " + this.requestIdDir);
        try {

            LOG.info("Bootstrap output, log=" + bootStrapErrorFilePath + " " + bootStrapOutputFilePath);
            int exitCode = process.waitFor();
            String outMesg = "";
            String errMesg = "";
            try {
                outMesg = FileUtils.readFileToString(new File(bootStrapOutputFilePath));
                errMesg = FileUtils.readFileToString(new File(bootStrapErrorFilePath));
            } catch (IOException io) {
                LOG.info("Error in reading files ", io);
            }
            scriptlog = outMesg + "\n\n" + errMesg;
            LOG.info("Script log Mesg " + scriptlog);
            if (exitCode != 0) {
                stat = BSStat.ERROR;
            } else {
                stat = BSStat.SUCCESS;
            }

            scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
            long startTime = System.currentTimeMillis();
            while (true) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Waiting for hosts status to be updated");
                }
                boolean pendingHosts = false;
                BootStrapStatus tmpStatus = bsImpl.getStatus(requestId);
                List<BSHostStatus> hostStatusList = tmpStatus.getHostsStatus();
                if (hostStatusList != null) {
                    for (BSHostStatus status : hostStatusList) {
                        if (status.getStatus().equals("RUNNING")) {
                            pendingHosts = true;
                        }
                    }
                } else {
                    //Failed to get host status, waiting for hosts status to be updated
                    pendingHosts = true;
                }
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Whether hosts status yet to be updated, pending=" + pendingHosts);
                }
                if (!pendingHosts) {
                    break;
                }
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    // continue
                }
                long now = System.currentTimeMillis();
                if (now >= (startTime + 15000)) {
                    LOG.warn("Gave up waiting for hosts status to be updated");
                    break;
                }
            }
        } catch (InterruptedException e) {
            throw new IOException(e);
        } finally {
            handle.cancel(true);
            /* schedule a last update */
            scheduler.schedule(new BSStatusCollector(), 0, TimeUnit.SECONDS);
            scheduler.shutdownNow();
            try {
                scheduler.awaitTermination(10, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                LOG.info("Interruped while waiting for scheduler");
            }
            process.destroy();
        }
    } catch (IOException io) {
        LOG.info("Error executing bootstrap " + io.getMessage());
        stat = BSStat.ERROR;
    } finally {
        /* get the bstatus */
        BootStrapStatus tmpStatus = bsImpl.getStatus(requestId);
        List<BSHostStatus> hostStatusList = tmpStatus.getHostsStatus();
        if (hostStatusList != null) {
            for (BSHostStatus hostStatus : hostStatusList) {
                if ("FAILED".equals(hostStatus.getStatus())) {
                    stat = BSStat.ERROR;
                    break;
                }
            }
        } else {
            stat = BSStat.ERROR;
        }
        tmpStatus.setLog(scriptlog);
        tmpStatus.setStatus(stat);
        bsImpl.updateStatus(requestId, tmpStatus);
        bsImpl.reset();
        // Remove private ssh key after bootstrap is complete
        try {
            FileUtils.forceDelete(sshKeyFile);
        } catch (IOException io) {
            LOG.warn(io.getMessage());
        }
        if (passwordFile != null) {
            // Remove password file after bootstrap is complete
            try {
                FileUtils.forceDelete(passwordFile);
            } catch (IOException io) {
                LOG.warn(io.getMessage());
            }
        }
        finished();
    }
}

From source file:org.wso2.carbon.registry.eventing.RegistryEventDispatcher.java

public RegistryEventDispatcher() {
    digestQueues = new LinkedHashMap<String, Queue<DigestEntry>>();
    for (String s : new String[] { "h", "d", "w", "f", "m", "y" }) {
        //TODO: Identify Queuing mechanisms.
        digestQueues.put(s, new ConcurrentLinkedQueue<DigestEntry>());
    }//from   w  w  w  .  ja v  a  2 s .  c  o m
    final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();
    executorService.scheduleAtFixedRate(new Runnable() {
        public void run() {
            GregorianCalendar utc = new GregorianCalendar(SimpleTimeZone.getTimeZone("UTC"));
            Map<String, List<DigestEntry>> digestEntries = new HashMap<String, List<DigestEntry>>();
            try {
                addToDigestEntryQueue(digestEntries, "h");
                if (utc.get(Calendar.HOUR_OF_DAY) == 0) {
                    addToDigestEntryQueue(digestEntries, "d");
                    if (utc.get(Calendar.DAY_OF_WEEK) == 1) {
                        addToDigestEntryQueue(digestEntries, "w");
                        if (utc.get(Calendar.WEEK_OF_YEAR) % 2 != 0) {
                            addToDigestEntryQueue(digestEntries, "f");
                        }
                    }
                    if (utc.get(Calendar.DAY_OF_MONTH) == 1) {
                        addToDigestEntryQueue(digestEntries, "m");
                        if (utc.get(Calendar.DAY_OF_YEAR) == 1) {
                            addToDigestEntryQueue(digestEntries, "y");

                        }
                    }
                }
                for (Map.Entry<String, List<DigestEntry>> e : digestEntries.entrySet()) {
                    List<DigestEntry> value = e.getValue();
                    Collections.sort(value, new Comparator<DigestEntry>() {
                        public int compare(DigestEntry o1, DigestEntry o2) {
                            if (o1.getTime() > o2.getTime()) {
                                return -1;
                            } else if (o1.getTime() < o2.getTime()) {
                                return 1;
                            }
                            return 0;
                        }
                    });
                    StringBuffer buffer = new StringBuffer();
                    for (DigestEntry entry : value) {
                        buffer.append(entry.getMessage()).append("\n\n");
                    }
                    RegistryEvent<String> re = new RegistryEvent<String>(buffer.toString());
                    re.setTopic(RegistryEvent.TOPIC_SEPARATOR + "DigestEvent");
                    DispatchEvent de = new DispatchEvent(re, e.getKey(), true);
                    Subscription subscription = new Subscription();
                    subscription.setTopicName(re.getTopic());
                    publishEvent(de, subscription, e.getKey(), true);
                }
            } catch (RuntimeException ignored) {
                // Eat any runtime exceptions that occurred, we don't care if the message went
                // or not.
            }
        }
    }, System.currentTimeMillis() % (1000 * 60 * 60), 1000 * 60 * 60, TimeUnit.MILLISECONDS);
    try {
        Runtime.getRuntime().addShutdownHook(new Thread() {
            public void run() {
                executorService.shutdownNow();
            }
        });
    } catch (IllegalStateException e) {
        executorService.shutdownNow();
        throw new IllegalStateException(
                "Unable to create registry event dispatcher during " + "shutdown process.");
    }
}

From source file:com.linkedin.d2.balancer.simple.SimpleLoadBalancerTest.java

@Test(groups = { "small", "back-end" })
public void testLoadBalancerSmoke()
        throws URISyntaxException, ServiceUnavailableException, InterruptedException, ExecutionException {
    for (int tryAgain = 0; tryAgain < 1000; ++tryAgain) {
        Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>>();
        Map<String, TransportClientFactory> clientFactories = new HashMap<String, TransportClientFactory>();
        List<String> prioritizedSchemes = new ArrayList<String>();

        MockStore<ServiceProperties> serviceRegistry = new MockStore<ServiceProperties>();
        MockStore<ClusterProperties> clusterRegistry = new MockStore<ClusterProperties>();
        MockStore<UriProperties> uriRegistry = new MockStore<UriProperties>();

        ScheduledExecutorService executorService = new SynchronousExecutorService();

        //loadBalancerStrategyFactories.put("rr", new RandomLoadBalancerStrategyFactory());
        loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
        // PrpcClientFactory();
        clientFactories.put("http", new DoNothingClientFactory()); // new
        // HttpClientFactory();

        SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry,
                clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);

        SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS);

        FutureCallback<None> balancerCallback = new FutureCallback<None>();
        loadBalancer.start(balancerCallback);
        balancerCallback.get();//from   w  w w.  j a v  a2 s  .  com

        URI uri1 = URI.create("http://test.qa1.com:1234");
        URI uri2 = URI.create("http://test.qa2.com:2345");
        URI uri3 = URI.create("http://test.qa3.com:6789");

        Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
        partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
        Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>(3);
        uriData.put(uri1, partitionData);
        uriData.put(uri2, partitionData);
        uriData.put(uri3, partitionData);

        prioritizedSchemes.add("http");

        clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));

        serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"),
                Collections.<String, Object>emptyMap(), null, null, prioritizedSchemes, null));
        uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));

        URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo");
        URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo");
        URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo");

        Set<URI> expectedUris = new HashSet<URI>();

        expectedUris.add(expectedUri1);
        expectedUris.add(expectedUri2);
        expectedUris.add(expectedUri3);

        for (int i = 0; i < 100; ++i) {
            RewriteClient client = (RewriteClient) loadBalancer.getClient(new URIRequest("d2://foo/52"),
                    new RequestContext());

            assertTrue(expectedUris.contains(client.getUri()));
            assertEquals(client.getUri().getScheme(), "http");
        }

        final CountDownLatch latch = new CountDownLatch(1);
        PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
            @Override
            public void done() {
                latch.countDown();
            }
        };

        state.shutdown(callback);

        if (!latch.await(60, TimeUnit.SECONDS)) {
            fail("unable to shutdown state");
        }

        executorService.shutdownNow();

        assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!");
    }
}

From source file:com.linkedin.d2.balancer.simple.SimpleLoadBalancerTest.java

/**
 * This test simulates dropping requests by playing with OverrideDropRate in config
 *
 *//*from www . j  a v  a2 s .  co  m*/
@Test(groups = { "small", "back-end" })
public void testLoadBalancerDropRate()
        throws ServiceUnavailableException, ExecutionException, InterruptedException {
    final int RETRY = 10;
    for (int tryAgain = 0; tryAgain < RETRY; ++tryAgain) {
        Map<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>> loadBalancerStrategyFactories = new HashMap<String, LoadBalancerStrategyFactory<? extends LoadBalancerStrategy>>();
        Map<String, TransportClientFactory> clientFactories = new HashMap<String, TransportClientFactory>();
        List<String> prioritizedSchemes = new ArrayList<String>();

        MockStore<ServiceProperties> serviceRegistry = new MockStore<ServiceProperties>();
        MockStore<ClusterProperties> clusterRegistry = new MockStore<ClusterProperties>();
        MockStore<UriProperties> uriRegistry = new MockStore<UriProperties>();

        ScheduledExecutorService executorService = new SynchronousExecutorService();

        //loadBalancerStrategyFactories.put("rr", new RandomLoadBalancerStrategyFactory());
        loadBalancerStrategyFactories.put("degrader", new DegraderLoadBalancerStrategyFactoryV3());
        // PrpcClientFactory();
        clientFactories.put("http", new DoNothingClientFactory()); // new
        // HttpClientFactory();

        SimpleLoadBalancerState state = new SimpleLoadBalancerState(executorService, uriRegistry,
                clusterRegistry, serviceRegistry, clientFactories, loadBalancerStrategyFactories);

        SimpleLoadBalancer loadBalancer = new SimpleLoadBalancer(state, 5, TimeUnit.SECONDS);

        FutureCallback<None> balancerCallback = new FutureCallback<None>();
        loadBalancer.start(balancerCallback);
        balancerCallback.get();

        URI uri1 = URI.create("http://test.qa1.com:1234");
        URI uri2 = URI.create("http://test.qa2.com:2345");
        URI uri3 = URI.create("http://test.qa3.com:6789");

        Map<Integer, PartitionData> partitionData = new HashMap<Integer, PartitionData>(1);
        partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d));
        Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<URI, Map<Integer, PartitionData>>(3);
        uriData.put(uri1, partitionData);
        uriData.put(uri2, partitionData);
        uriData.put(uri3, partitionData);

        prioritizedSchemes.add("http");

        clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1"));

        serviceRegistry.put("foo", new ServiceProperties("foo", "cluster-1", "/foo", Arrays.asList("degrader"),
                Collections.<String, Object>emptyMap(), null, null, prioritizedSchemes, null));
        uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData));

        URI expectedUri1 = URI.create("http://test.qa1.com:1234/foo");
        URI expectedUri2 = URI.create("http://test.qa2.com:2345/foo");
        URI expectedUri3 = URI.create("http://test.qa3.com:6789/foo");

        Set<URI> expectedUris = new HashSet<URI>();

        expectedUris.add(expectedUri1);
        expectedUris.add(expectedUri2);
        expectedUris.add(expectedUri3);
        Random random = new Random();

        for (int i = 0; i < 100; ++i) {
            try {
                RewriteClient client = (RewriteClient) loadBalancer.getClient(new URIRequest("d2://foo/52"),
                        new RequestContext());
                TrackerClient tClient = (TrackerClient) client.getWrappedClient();
                DegraderImpl degrader = (DegraderImpl) tClient
                        .getDegrader(DefaultPartitionAccessor.DEFAULT_PARTITION_ID);
                DegraderImpl.Config cfg = new DegraderImpl.Config(degrader.getConfig());
                // Change DropRate to 0.0 at the rate of 1/3
                cfg.setOverrideDropRate((random.nextInt(2) == 0) ? 1.0 : 0.0);
                degrader.setConfig(cfg);

                assertTrue(expectedUris.contains(client.getUri()));
                assertEquals(client.getUri().getScheme(), "http");
            } catch (ServiceUnavailableException e) {
                assertTrue(e.toString().contains("in a bad state (high latency/high error)"));
            }
        }

        final CountDownLatch latch = new CountDownLatch(1);
        PropertyEventShutdownCallback callback = new PropertyEventShutdownCallback() {
            @Override
            public void done() {
                latch.countDown();
            }
        };

        state.shutdown(callback);

        if (!latch.await(60, TimeUnit.SECONDS)) {
            fail("unable to shutdown state");
        }

        executorService.shutdownNow();

        assertTrue(executorService.isShutdown(), "ExecutorService should have shut down!");
    }
}

From source file:org.hyperledger.fabric.sdk.Channel.java

/**
 * Shutdown the channel with all resources released.
 *
 * @param force force immediate shutdown.
 *//*from  w w  w.  j a  v a 2 s. c o m*/

public synchronized void shutdown(boolean force) {

    if (shutdown) {
        return;
    }

    String ltransactionListenerProcessorHandle = transactionListenerProcessorHandle;
    transactionListenerProcessorHandle = null;
    if (null != ltransactionListenerProcessorHandle) {

        try {
            unregisterBlockListener(ltransactionListenerProcessorHandle);
        } catch (Exception e) {
            logger.error(format("Shutting down channel %s transactionListenerProcessorHandle", name), e);
        }
    }

    String lchaincodeEventUpgradeListenerHandle = chaincodeEventUpgradeListenerHandle;
    chaincodeEventUpgradeListenerHandle = null;
    if (null != lchaincodeEventUpgradeListenerHandle) {

        try {
            unregisterChaincodeEventListener(lchaincodeEventUpgradeListenerHandle);
        } catch (Exception e) {
            logger.error(format("Shutting down channel %s chaincodeEventUpgradeListenr", name), e);
        }
    }

    initialized = false;
    shutdown = true;

    final ServiceDiscovery lserviceDiscovery = serviceDiscovery;
    serviceDiscovery = null;
    if (null != lserviceDiscovery) {
        lserviceDiscovery.shutdown();
    }

    if (chainCodeListeners != null) {
        chainCodeListeners.clear();

    }

    if (blockListeners != null) {
        blockListeners.clear();
    }

    if (client != null) {
        client.removeChannel(this);
    }

    client = null;

    for (EventHub eh : eventHubs) {

        try {
            eh.shutdown();
        } catch (Exception e) {
            // Best effort.
        }

    }
    eventHubs.clear();
    for (Peer peer : new ArrayList<>(getPeers())) {

        try {
            removePeerInternal(peer);
            peer.shutdown(force);
        } catch (Exception e) {
            // Best effort.
        }
    }
    peers.clear(); // make sure.

    peerEndpointMap.clear();
    ordererEndpointMap.clear();

    //Make sure
    for (Set<Peer> peerRoleSet : peerRoleSetMap.values()) {
        peerRoleSet.clear();
    }

    for (Orderer orderer : getOrderers()) {
        orderer.shutdown(force);
    }

    orderers.clear();

    if (null != eventQueueThread) {
        eventQueueThread.interrupt();
        eventQueueThread = null;
    }
    ScheduledFuture<?> lsweeper = sweeper;
    sweeper = null;

    if (null != lsweeper) {
        lsweeper.cancel(true);
    }

    ScheduledExecutorService lse = sweeperExecutorService;
    sweeperExecutorService = null;
    if (null != lse) {
        lse.shutdownNow();
    }
}

From source file:org.apache.flink.mesos.runtime.clusterframework.MesosApplicationMasterRunner.java

/**
 * The main work method, must run as a privileged action.
 *
 * @return The return code for the Java process.
 *///from w ww.j a  v  a 2  s  .  com
protected int runPrivileged(Configuration config, Configuration dynamicProperties) {

    ActorSystem actorSystem = null;
    WebMonitor webMonitor = null;
    MesosArtifactServer artifactServer = null;
    ScheduledExecutorService futureExecutor = null;
    ExecutorService ioExecutor = null;
    MesosServices mesosServices = null;

    try {
        // ------- (1) load and parse / validate all configurations -------

        // Note that we use the "appMasterHostname" given by the system, to make sure
        // we use the hostnames consistently throughout akka.
        // for akka "localhost" and "localhost.localdomain" are different actors.
        final String appMasterHostname = InetAddress.getLocalHost().getHostName();

        // Mesos configuration
        final MesosConfiguration mesosConfig = createMesosConfig(config, appMasterHostname);

        // JM configuration
        int numberProcessors = Hardware.getNumberCPUCores();

        futureExecutor = Executors.newScheduledThreadPool(numberProcessors,
                new ExecutorThreadFactory("mesos-jobmanager-future"));

        ioExecutor = Executors.newFixedThreadPool(numberProcessors,
                new ExecutorThreadFactory("mesos-jobmanager-io"));

        mesosServices = MesosServicesUtils.createMesosServices(config);

        // TM configuration
        final MesosTaskManagerParameters taskManagerParameters = MesosTaskManagerParameters.create(config);

        LOG.info("TaskManagers will be created with {} task slots",
                taskManagerParameters.containeredParameters().numSlots());
        LOG.info(
                "TaskManagers will be started with container size {} MB, JVM heap size {} MB, "
                        + "JVM direct memory limit {} MB, {} cpus",
                taskManagerParameters.containeredParameters().taskManagerTotalMemoryMB(),
                taskManagerParameters.containeredParameters().taskManagerHeapSizeMB(),
                taskManagerParameters.containeredParameters().taskManagerDirectMemoryLimitMB(),
                taskManagerParameters.cpus());

        // JM endpoint, which should be explicitly configured based on acquired net resources
        final int listeningPort = config.getInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY,
                ConfigConstants.DEFAULT_JOB_MANAGER_IPC_PORT);
        checkState(listeningPort >= 0 && listeningPort <= 65536, "Config parameter \""
                + ConfigConstants.JOB_MANAGER_IPC_PORT_KEY + "\" is invalid, it must be between 0 and 65536");

        // ----------------- (2) start the actor system -------------------

        // try to start the actor system, JobManager and JobManager actor system
        // using the configured address and ports
        actorSystem = BootstrapTools.startActorSystem(config, appMasterHostname, listeningPort, LOG);

        Address address = AkkaUtils.getAddress(actorSystem);
        final String akkaHostname = address.host().get();
        final int akkaPort = (Integer) address.port().get();

        LOG.info("Actor system bound to hostname {}.", akkaHostname);

        // try to start the artifact server
        LOG.debug("Starting Artifact Server");
        final int artifactServerPort = config.getInteger(ConfigConstants.MESOS_ARTIFACT_SERVER_PORT_KEY,
                ConfigConstants.DEFAULT_MESOS_ARTIFACT_SERVER_PORT);
        final String artifactServerPrefix = UUID.randomUUID().toString();
        artifactServer = new MesosArtifactServer(artifactServerPrefix, akkaHostname, artifactServerPort,
                config);

        // ----------------- (3) Generate the configuration for the TaskManagers -------------------

        // generate a container spec which conveys the artifacts/vars needed to launch a TM
        ContainerSpecification taskManagerContainerSpec = new ContainerSpecification();

        // propagate the AM dynamic configuration to the TM
        taskManagerContainerSpec.getDynamicConfiguration().addAll(dynamicProperties);

        // propagate newly-generated configuration elements
        final Configuration taskManagerConfig = BootstrapTools.generateTaskManagerConfiguration(
                new Configuration(), akkaHostname, akkaPort,
                taskManagerParameters.containeredParameters().numSlots(), TASKMANAGER_REGISTRATION_TIMEOUT);
        taskManagerContainerSpec.getDynamicConfiguration().addAll(taskManagerConfig);

        // apply the overlays
        applyOverlays(config, taskManagerContainerSpec);

        // configure the artifact server to serve the specified artifacts
        configureArtifactServer(artifactServer, taskManagerContainerSpec);

        // ----------------- (4) start the actors -------------------

        // 1) JobManager & Archive (in non-HA case, the leader service takes this)
        // 2) Web Monitor (we need its port to register)
        // 3) Resource Master for Mesos
        // 4) Process reapers for the JobManager and Resource Master

        // 1: the JobManager
        LOG.debug("Starting JobManager actor");

        // we start the JobManager with its standard name
        ActorRef jobManager = JobManager.startJobManagerActors(config, actorSystem, futureExecutor, ioExecutor,
                new scala.Some<>(JobManager.JOB_MANAGER_NAME()), scala.Option.<String>empty(),
                getJobManagerClass(), getArchivistClass())._1();

        // 2: the web monitor
        LOG.debug("Starting Web Frontend");

        webMonitor = BootstrapTools.startWebMonitorIfConfigured(config, actorSystem, jobManager, LOG);
        if (webMonitor != null) {
            final URL webMonitorURL = new URL("http", appMasterHostname, webMonitor.getServerPort(), "/");
            mesosConfig.frameworkInfo().setWebuiUrl(webMonitorURL.toExternalForm());
        }

        // 3: Flink's Mesos ResourceManager
        LOG.debug("Starting Mesos Flink Resource Manager");

        // create the worker store to persist task information across restarts
        MesosWorkerStore workerStore = mesosServices.createMesosWorkerStore(config, ioExecutor);

        // we need the leader retrieval service here to be informed of new
        // leader session IDs, even though there can be only one leader ever
        LeaderRetrievalService leaderRetriever = LeaderRetrievalUtils.createLeaderRetrievalService(config,
                jobManager);

        Props resourceMasterProps = MesosFlinkResourceManager.createActorProps(getResourceManagerClass(),
                config, mesosConfig, workerStore, leaderRetriever, taskManagerParameters,
                taskManagerContainerSpec, artifactServer, LOG);

        ActorRef resourceMaster = actorSystem.actorOf(resourceMasterProps, "Mesos_Resource_Master");

        // 4: Process reapers
        // The process reapers ensure that upon unexpected actor death, the process exits
        // and does not stay lingering around unresponsive

        LOG.debug("Starting process reapers for JobManager");

        actorSystem.actorOf(Props.create(ProcessReaper.class, resourceMaster, LOG, ACTOR_DIED_EXIT_CODE),
                "Mesos_Resource_Master_Process_Reaper");

        actorSystem.actorOf(Props.create(ProcessReaper.class, jobManager, LOG, ACTOR_DIED_EXIT_CODE),
                "JobManager_Process_Reaper");
    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("Mesos JobManager initialization failed", t);

        if (webMonitor != null) {
            try {
                webMonitor.stop();
            } catch (Throwable ignored) {
                LOG.warn("Failed to stop the web frontend", ignored);
            }
        }

        if (artifactServer != null) {
            try {
                artifactServer.stop();
            } catch (Throwable ignored) {
                LOG.error("Failed to stop the artifact server", ignored);
            }
        }

        if (actorSystem != null) {
            try {
                actorSystem.shutdown();
            } catch (Throwable tt) {
                LOG.error("Error shutting down actor system", tt);
            }
        }

        if (futureExecutor != null) {
            try {
                futureExecutor.shutdownNow();
            } catch (Throwable tt) {
                LOG.error("Error shutting down future executor", tt);
            }
        }

        if (ioExecutor != null) {
            try {
                ioExecutor.shutdownNow();
            } catch (Throwable tt) {
                LOG.error("Error shutting down io executor", tt);
            }
        }

        if (mesosServices != null) {
            try {
                mesosServices.close(false);
            } catch (Throwable tt) {
                LOG.error("Error closing the mesos services.", tt);
            }
        }

        return INIT_ERROR_EXIT_CODE;
    }

    // everything started, we can wait until all is done or the process is killed
    LOG.info("Mesos JobManager started");

    // wait until everything is done
    actorSystem.awaitTermination();

    // if we get here, everything work out jolly all right, and we even exited smoothly
    if (webMonitor != null) {
        try {
            webMonitor.stop();
        } catch (Throwable t) {
            LOG.error("Failed to stop the web frontend", t);
        }
    }

    try {
        artifactServer.stop();
    } catch (Throwable t) {
        LOG.error("Failed to stop the artifact server", t);
    }

    org.apache.flink.runtime.concurrent.Executors.gracefulShutdown(AkkaUtils.getTimeout(config).toMillis(),
            TimeUnit.MILLISECONDS, futureExecutor, ioExecutor);

    try {
        mesosServices.close(true);
    } catch (Throwable t) {
        LOG.error("Failed to clean up and close MesosServices.", t);
    }

    return 0;
}

From source file:edu.umass.cs.gigapaxos.PaxosManager.java

/**
 * This test method is deprecated and will either be removed or
 * significantly revamped. Use TESTPaxosMain instead to run a single machine
 * test with multiple virtual nodes./*www  . j  av  a  2s  .  c o m*/
 * 
 * @param args
 * @throws InterruptedException
 * @throws IOException
 * @throws JSONException
 */
@Deprecated
static void test(String[] args) throws InterruptedException, IOException, JSONException {
    int[] members = TESTPaxosConfig.getDefaultGroup();
    int numNodes = members.length;

    SampleNodeConfig<Integer> snc = new SampleNodeConfig<Integer>(2000);
    snc.localSetup(Util.arrayToIntSet(members));

    @SuppressWarnings("unchecked")
    PaxosManager<Integer>[] pms = new PaxosManager[numNodes];
    TESTPaxosApp[] apps = new TESTPaxosApp[numNodes];

    /* We always test with the first member crashed. This also ensures that
     * the system is fault-tolerant to the failure of the default
     * coordinator, which in our policy is the first (or lowest numbered)
     * node. */
    TESTPaxosConfig.crash(members[0]);
    /* We disable sending replies to client in PaxosManager's unit-test. To
     * test with clients, we rely on other tests in TESTPaxosMain
     * (single-machine) or on TESTPaxosNode and TESTPaxosClient for
     * distributed testing. */
    TESTPaxosConfig.setSendReplyToClient(false);

    /* This setting is "guilty until proven innocent", i.e., each node will
     * start out assuming that all other nodes are dead. This is probably
     * too pessimistic as it will cause every node to run for coordinator
     * when it starts up but is good for testing. */
    FailureDetection.setParanoid();

    // Set up paxos managers and apps with nio
    for (int i = 0; i < numNodes; i++) {
        System.out.println("Initiating PaxosManager at node " + members[i]);
        JSONNIOTransport<Integer> niot = new JSONNIOTransport<Integer>(members[i], snc,
                new PacketDemultiplexerDefault(), true);
        apps[i] = new TESTPaxosApp(niot); // app, PM reuse nio
        pms[i] = new PaxosManager<Integer>(members[i], snc, niot, apps[i]);
    }

    System.out.println("Initiated all " + numNodes + " paxos managers with failure detectors..\n");

    /* We don't rigorously test with multiple groups as they are
     * independent, but this is useful for memory testing. */
    int numPaxosGroups = 2;
    String[] names = new String[numPaxosGroups];
    for (int i = 0; i < names.length; i++)
        names[i] = "paxos" + i;

    System.out.println("Creating " + numPaxosGroups + " paxos groups each with " + numNodes
            + " members each, one each at each of the " + numNodes + " nodes");
    for (int node = 0; node < numNodes; node++) {
        int k = 1;
        for (int group = 0; group < numPaxosGroups; group++) {
            // creating a paxos instance may induce recovery from disk
            pms[node].createPaxosInstance(names[group], 0, Util.arrayToIntSet(members), apps[node], null, null,
                    false);
            if (numPaxosGroups > 1000 && ((group % k == 0 && ((k *= 2) > 0)) || group % 100000 == 0)) {
                System.out.print(group + " ");
            }
        }
        System.out.println("..node" + members[node] + " done");
    }
    Thread.sleep(1000);

    /* Wait for all paxos managers to finish recovery. Recovery is finished
     * when initiateRecovery() is complete. At this point, all the paxos
     * groups at that node would have also rolled forward. */
    int maxRecoverySlot = -1;
    int maxRecoveredNode = -1;
    for (int i = 0; i < numNodes; i++) {
        while (!TESTPaxosConfig.isCrashed(members[i]) && !TESTPaxosConfig.getRecovered(members[i], names[0])) {
            log.info("Waiting for node " + members[i] + " to recover ");
            pms[i].waitToRecover();
        }
        log.info("Node" + members[i] + " finished recovery including rollback;\n" + names[0]
                + " recovered at slot " + apps[i].getNumCommitted(names[0]));
        // need max recovery slot for names[0] below
        maxRecoverySlot = Math.max(maxRecoverySlot, apps[i].getNumCommitted(names[0]));
        maxRecoveredNode = i;
    }

    System.out.println("all nodes done creating groups.");

    /*********** Finished creating paxos instances for testing *****************/

    /************* Begin ClientRequestTask **************************/
    ScheduledExecutorService execpool = Executors.newScheduledThreadPool(5);
    class ClientRequestTask implements Runnable {
        private final RequestPacket request;
        private final PaxosManager<Integer> paxosManager;

        ClientRequestTask(RequestPacket req, PaxosManager<Integer> pm) {
            request = req;
            paxosManager = pm;
        }

        public void run() {
            try {
                JSONObject reqJson = request.toJSONObject();
                JSONPacket.putPacketType(reqJson, PaxosPacketType.PAXOS_PACKET.getInt());
                paxosManager.propose(request.getPaxosID(), request, null);
            } catch (JSONException e) {
                e.printStackTrace();
            }
        }
    }
    /************* End ClientRequestTask **************************/

    /* Create and schedule requests. All requests are scheduled immediately
     * to test concurrency */
    int numRequests = 1000;
    RequestPacket[] reqs = new RequestPacket[numRequests];
    ScheduledFuture<?>[] futures = new ScheduledFuture[numRequests];
    int numExceptions = 0;
    double scheduledDelay = 0;
    for (int i = 0; i < numRequests; i++) {
        reqs[i] = new RequestPacket(i, "[ Sample write request numbered " + i + " ]", false);
        reqs[i].putPaxosID(names[0], 0);
        JSONObject reqJson = reqs[i].toJSONObject();
        JSONPacket.putPacketType(reqJson, PaxosPacketType.PAXOS_PACKET.getInt());
        try {
            ClientRequestTask crtask = new ClientRequestTask(reqs[i], pms[1]);
            futures[i] = (ScheduledFuture<?>) execpool.schedule(crtask, (long) scheduledDelay,
                    TimeUnit.MILLISECONDS);
            scheduledDelay += 0;
        } catch (Exception e) {
            e.printStackTrace();
            continue;
        }
    }
    /* Any exceptions below could occur because of exceptions inside paxos.
     * Scheduling a request will invoke PaxosManager.propose() that will
     * cause it to send the request to the corresponding
     * PaxosInstanceStateMachine. */
    log.info("Waiting for request scheduling to complete.");
    for (int i = 0; i < numRequests; i++) {
        try {
            futures[i].get();
        } catch (Exception e) {
            e.printStackTrace();
            numExceptions++;
        }
    }
    log.info("Request scheduling complete; numExceptions=" + numExceptions);
    Thread.sleep(1000);

    /* Wait for scheduled requests to finish being processed by paxos. We
     * check for this by checking that at least one node has executed up to
     * the slot number maxRecoverySlot + numRequests. */
    while (apps[maxRecoveredNode].getNumCommitted(names[0]) < maxRecoverySlot + numRequests) {
        apps[maxRecoveredNode].waitToFinish();
        ;
    }
    log.info("Node" + maxRecoveredNode + " has executed up to slot " + (maxRecoverySlot + numRequests));

    /* The code below waits for all uncrashed replicas to finish executing
     * up to the same slot and will then assert the SMR invariant, i.e.,
     * they all made the same state transitions up to that slot. */
    int numCommitted = 0;
    for (int i = 0; i < numNodes; i++) {
        for (int j = i + 1; j < numNodes; j++) {
            if (TESTPaxosConfig.isCrashed(members[i]) || TESTPaxosConfig.isCrashed(members[j]))
                continue; // ignore crashed nodes

            int committed1 = apps[i].getNumCommitted(names[0]);
            int committed2 = apps[j].getNumCommitted(names[0]);
            // Wait for the other node to catch up
            while (committed1 != committed2) {
                if (committed1 > committed2)
                    apps[j].waitToFinish(names[0], committed1);
                else if (committed1 < committed2)
                    apps[i].waitToFinish(names[0], committed2);
                log.info("Waiting : (slot1,hash1)=(" + committed1 + "," + apps[i].getHash(names[0])
                        + "(; (slot2,hash2=" + committed2 + "," + apps[j].getHash(names[0]) + ")");
                Thread.sleep(1000);
                committed1 = apps[i].getNumCommitted(names[0]);
                committed2 = apps[j].getNumCommitted(names[0]);
            }
            // Both nodes caught up to the same slot
            assert (committed1 == committed2) : "numCommitted@" + i + "=" + committed1 + ", numCommitted@" + j
                    + "=" + committed2;
            // Assert state machine replication invariant
            numCommitted = apps[i].getNumCommitted(names[0]);
            assert (apps[i].getHash(names[0]) == apps[j].getHash(names[0])) : ("Waiting : (slot1,hash1)=("
                    + committed1 + "," + apps[i].getHash(names[0]) + "(; (slot2,hash2=" + committed2 + ","
                    + apps[j].getHash(names[0]) + ")");
            ; // end of SMR invariant
        }
    }

    /* Print preempted requests if any. These could happen during
     * coordinator changes. Preempted requests are converted to no-ops and
     * forwarded to the current presumed coordinator by paxos. */
    String preemptedReqs = "[ ";
    int numPreempted = 0;
    for (int i = 0; i < numRequests; i++) {
        if (!TESTPaxosConfig.isCommitted(reqs[i].requestID)) {
            preemptedReqs += (i + " ");
            numPreempted++;
        }
    }
    preemptedReqs += "]";

    System.out.println("\n\nTest completed. Executed " + numCommitted + " requests consistently including "
            + (numRequests - numPreempted) + " of " + numRequests + " received requests;\nPreempted requests = "
            + preemptedReqs + "; numExceptions=" + numExceptions + "; average message log time="
            + Util.df(DelayProfiler.get("logDelay")) + "ms.\n"
            + "\nNote that it is possible for the test to be successful even if the number of consistently\n"
            + "executed requests is less than the number of received requests as paxos only guarantees\n"
            + "consistency, i.e., that all replicas executed requests in the same order, not that all requests\n"
            + "issued will get executed. The latter property can be achieved by clients reissuing requests\n"
            + "until successfully executed. With reissuals, clients do need to worry about double execution,\n"
            + "so they should be careful. A client is not guaranteed to get a failure message if the request fails,\n"
            + "e.g., if the replica receiving a request dies immediately. If the client uses a timeout to detect\n"
            + "failure and thereupon reissue its request, it is possible that both the original and re-issued\n"
            + "requests are executed. Clients can get around this problem by using sequence numbers within\n"
            + "their app, reading the current sequence number, and then trying to commit their write provided the\n"
            + "sequence number has not changed in the meantime. There are other alternatives, but all of these\n"
            + "are application-specific; they are not paxos's problem\n");
    for (int i = 0; i < numNodes; i++) {
        System.out.println(pms[i].printLog(names[0]));
    }
    execpool.shutdownNow();
    for (PaxosManager<Integer> pm : pms)
        pm.close();
}