Example usage for java.util.concurrent CountDownLatch getCount

List of usage examples for java.util.concurrent CountDownLatch getCount

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch getCount.

Prototype

public long getCount() 

Source Link

Document

Returns the current count.

Usage

From source file:org.springframework.integration.ip.tcp.connection.CachingClientConnectionFactoryTests.java

@Test
public void testCachedFailoverRealBadHost() throws Exception {
    TcpNetServerConnectionFactory server1 = new TcpNetServerConnectionFactory(0);
    server1.setBeanName("server1");
    final CountDownLatch latch1 = new CountDownLatch(3);
    server1.registerListener(message -> {
        latch1.countDown();// ww w  .j a  va 2  s .  c  o  m
        return false;
    });
    server1.start();
    TestingUtilities.waitListening(server1, 10000L);
    int port1 = server1.getPort();
    TcpNetServerConnectionFactory server2 = new TcpNetServerConnectionFactory(0);
    server1.setBeanName("server2");
    final CountDownLatch latch2 = new CountDownLatch(2);
    server2.registerListener(message -> {
        latch2.countDown();
        return false;
    });
    server2.start();
    TestingUtilities.waitListening(server2, 10000L);
    int port2 = server2.getPort();
    // Failover
    AbstractClientConnectionFactory factory1 = new TcpNetClientConnectionFactory("junkjunk", port1);
    factory1.setBeanName("client1");
    factory1.registerListener(message -> false);
    AbstractClientConnectionFactory factory2 = new TcpNetClientConnectionFactory("localhost", port2);
    factory2.setBeanName("client2");
    factory2.registerListener(message -> false);
    List<AbstractClientConnectionFactory> factories = new ArrayList<AbstractClientConnectionFactory>();
    factories.add(factory1);
    factories.add(factory2);
    FailoverClientConnectionFactory failoverFactory = new FailoverClientConnectionFactory(factories);

    // Cache
    CachingClientConnectionFactory cachingFactory = new CachingClientConnectionFactory(failoverFactory, 2);
    cachingFactory.start();
    TcpConnection conn1 = cachingFactory.getConnection();
    GenericMessage<String> message = new GenericMessage<String>("foo");
    conn1.send(message);
    conn1.close();
    TcpConnection conn2 = cachingFactory.getConnection();
    assertSame(((TcpConnectionInterceptorSupport) conn1).getTheConnection(),
            ((TcpConnectionInterceptorSupport) conn2).getTheConnection());
    conn2.send(message);
    conn1 = cachingFactory.getConnection();
    assertNotSame(((TcpConnectionInterceptorSupport) conn1).getTheConnection(),
            ((TcpConnectionInterceptorSupport) conn2).getTheConnection());
    conn1.send(message);
    conn1.close();
    conn2.close();
    assertTrue(latch2.await(10, TimeUnit.SECONDS));
    assertEquals(3, latch1.getCount());
    server1.stop();
    server2.stop();
}

From source file:org.kurento.test.functional.recorder.BaseRecorder.java

protected void launchBrowser(MediaPipeline mp, WebRtcEndpoint webRtcEp, PlayerEndpoint playerEp,
        RecorderEndpoint recorderEp, String expectedVideoCodec, String expectedAudioCodec, String recordingFile,
        Color expectedColor, int xColor, int yColor, int playTime) throws InterruptedException {

    Timer gettingStats = new Timer();
    final CountDownLatch errorContinuityAudiolatch = new CountDownLatch(1);

    getPage().subscribeEvents("playing");
    getPage().initWebRtc(webRtcEp, WebRtcChannel.AUDIO_AND_VIDEO, WebRtcMode.RCV_ONLY);
    playerEp.play();//from   w ww  . j a v  a2 s . com
    final CountDownLatch eosLatch = new CountDownLatch(1);
    playerEp.addEndOfStreamListener(new EventListener<EndOfStreamEvent>() {
        @Override
        public void onEvent(EndOfStreamEvent event) {
            eosLatch.countDown();
        }
    });

    if (recorderEp != null) {
        recorderEp.record();
    }

    // Assertions
    String inRecording = recorderEp == null ? " in the recording" : "";

    Assert.assertTrue("Not received media (timeout waiting playing event)" + inRecording,
            getPage().waitForEvent("playing"));

    if (recorderEp == null) {
        // Checking continuity of the audio
        getPage().activatePeerConnectionInboundStats("webRtcPeer.peerConnection");

        gettingStats.schedule(new CheckAudioTimerTask(errorContinuityAudiolatch, getPage()), 100, 200);
    }

    Assert.assertTrue(
            "Color at coordinates " + xColor + "," + yColor + " must be " + expectedColor + inRecording,
            getPage().similarColorAt(expectedColor, xColor, yColor));
    Assert.assertTrue("Not received EOS event in player" + inRecording,
            eosLatch.await(getPage().getTimeout(), TimeUnit.SECONDS));

    final CountDownLatch recorderLatch = new CountDownLatch(1);
    if (recorderEp != null) {

        saveGstreamerDot(mp);

        recorderEp.stopAndWait(new Continuation<Void>() {

            @Override
            public void onSuccess(Void result) throws Exception {
                recorderLatch.countDown();
            }

            @Override
            public void onError(Throwable cause) throws Exception {
                recorderLatch.countDown();
            }
        });

        Assert.assertTrue("Not stop properly", recorderLatch.await(getPage().getTimeout(), TimeUnit.SECONDS));

        // Wait until file exists
        waitForFileExists(recordingFile);

        AssertMedia.assertCodecs(recordingFile, expectedVideoCodec, expectedAudioCodec);
        AssertMedia.assertDuration(recordingFile, TimeUnit.SECONDS.toMillis(playTime),
                TimeUnit.SECONDS.toMillis(getPage().getThresholdTime()));

    } else {
        gettingStats.cancel();
        getPage().stopPeerConnectionInboundStats("webRtcPeer.peerConnection");
        double currentTime = getPage().getCurrentTime();
        Assert.assertTrue("Error in play time in the recorded video (expected: " + playTime + " sec, real: "
                + currentTime + " sec) " + inRecording, getPage().compare(playTime, currentTime));

        if (recorderEp == null) {
            Assert.assertTrue("Check audio. There were more than 2 seconds without receiving packets",
                    errorContinuityAudiolatch.getCount() == 1);
        }

    }
}

From source file:com.mozilla.bagheera.consumer.KafkaConsumer.java

@Override
public void poll() {
    final CountDownLatch latch = new CountDownLatch(streams.size());
    for (final KafkaStream<byte[], byte[]> stream : streams) {
        workers.add(executor.submit(new Callable<Void>() {
            @Override/*from ww  w .java2  s  .  co  m*/
            public Void call() {
                try {
                    for (MessageAndMetadata<byte[], byte[]> mam : stream) {
                        BagheeraMessage bmsg = BagheeraMessage.parseFrom(mam.message());
                        // get the sink for this message's namespace 
                        // (typically only one sink unless a regex pattern was used to listen to multiple topics)
                        KeyValueSink sink = sinkFactory.getSink(bmsg.getNamespace());
                        if (sink == null) {
                            LOG.error("Could not obtain sink for namespace: " + bmsg.getNamespace());
                            break;
                        }
                        if (bmsg.getOperation() == Operation.CREATE_UPDATE && bmsg.hasId()
                                && bmsg.hasPayload()) {
                            if (validationPipeline == null
                                    || validationPipeline.isValid(bmsg.getPayload().toByteArray())) {
                                if (bmsg.hasTimestamp()) {
                                    sink.store(bmsg.getId(), bmsg.getPayload().toByteArray(),
                                            bmsg.getTimestamp());
                                } else {
                                    sink.store(bmsg.getId(), bmsg.getPayload().toByteArray());
                                }
                            } else {
                                invalidMessageMeter.mark();
                                // TODO: sample out an example payload
                                LOG.warn("Invalid payload for namespace: " + bmsg.getNamespace());
                            }
                        } else if (bmsg.getOperation() == Operation.DELETE && bmsg.hasId()) {
                            sink.delete(bmsg.getId());
                        }
                        consumed.mark();
                    }
                } catch (InvalidProtocolBufferException e) {
                    LOG.error("Invalid protocol buffer in data stream", e);
                } catch (UnsupportedEncodingException e) {
                    LOG.error("Message ID was not in UTF-8 encoding", e);
                } catch (IOException e) {
                    LOG.error("IO error while storing to data sink", e);
                } finally {
                    latch.countDown();
                }

                return null;
            }
        }));
    }

    // Wait for all tasks to complete which in the normal case they will
    // run indefinitely unless we detect that a thread exited
    try {
        while (true) {
            latch.await(10, TimeUnit.SECONDS);
            if (latch.getCount() != streams.size()) {
                // we have a dead thread and should exit
                break;
            }
        }
    } catch (InterruptedException e) {
        LOG.info("Interrupted during polling", e);
    }

    // Spit out errors if there were any
    for (Future<Void> worker : workers) {
        try {
            if (worker.isDone() && !worker.isCancelled()) {
                worker.get(1, TimeUnit.SECONDS);
            }
        } catch (InterruptedException e) {
            LOG.error("Thread was interrupted:", e);
        } catch (ExecutionException e) {
            LOG.error("Exception occured in thread:", e);
        } catch (TimeoutException e) {
            LOG.error("Timed out waiting for thread result:", e);
        } catch (CancellationException e) {
            LOG.error("Thread has been canceled: ", e);
        }
    }
}

From source file:net.kmycode.javaspeechserver.cloud.StreamingRecognizeClient.java

/** Send streaming recognize requests to server. */
public void recognize() throws InterruptedException, IOException {
    final AudioRecorder recorder = AudioRecorder.getDefault();
    final StopWatch stopwatch = new StopWatch();

    final CountDownLatch finishLatch = new CountDownLatch(1);
    StreamObserver<StreamingRecognizeResponse> responseObserver = new StreamObserver<StreamingRecognizeResponse>() {
        private int sentenceLength = 1;

        /**//from   w  w  w. j  ava2s  .co m
        * Prints the transcription results. Interim results are overwritten by subsequent
        * results, until a final one is returned, at which point we start a new line.
        *
        * Flags the program to exit when it hears "exit".
        */
        @Override
        public void onNext(StreamingRecognizeResponse response) {

            byteStringQueue.clear();
            stopwatch.reset();

            List<StreamingRecognitionResult> results = response.getResultsList();
            if (results.size() < 1) {
                return;
            }

            StreamingRecognitionResult result = results.get(0);
            String transcript = result.getAlternatives(0).getTranscript();

            // Print interim results with a line feed, so subsequent transcriptions will overwrite
            // it. Final result will print a newline.
            String format = "%-" + this.sentenceLength + 's';
            format += " (" + result.getAlternatives(0).getConfidence() + ") ";
            if (result.getIsFinal()) {
                format += '\n';
                this.sentenceLength = 1;
                finishLatch.countDown();
            } else {
                format += '\r';
                this.sentenceLength = transcript.length();
            }
            System.out.print(String.format(format, transcript));
        }

        @Override
        public void onError(Throwable error) {
            logger.log(Level.ERROR, "recognize failed: {0}", error);
            finishLatch.countDown();
        }

        @Override
        public void onCompleted() {
            logger.info("recognize completed.");
            finishLatch.countDown();
        }
    };

    this.requestObserver = this.speechClient.streamingRecognize(responseObserver);
    try {
        // Build and send a StreamingRecognizeRequest containing the parameters for
        // processing the audio.
        RecognitionConfig config = RecognitionConfig.newBuilder()
                .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16).setSampleRate(recorder.getSamplingRate())
                .setLanguageCode("ja-JP").build();
        StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config)
                .setInterimResults(true).setSingleUtterance(false).build();

        StreamingRecognizeRequest initial = StreamingRecognizeRequest.newBuilder()
                .setStreamingConfig(streamingConfig).build();
        requestObserver.onNext(initial);

        while (this.byteStringQueue.size() > 0) {
            ByteString data = this.byteStringQueue.poll();
            this.request(data);
        }

        // Read and send sequential buffers of audio as additional RecognizeRequests.
        while (finishLatch.getCount() > 0 && recorder.read()) {
            if (recorder.isSound()) {
                ByteString data = this.recorder.getBufferAsByteString();
                this.byteStringQueue.add(data);

                if (!stopwatch.isStarted()) {
                    stopwatch.start();
                } else if (stopwatch.getTime() > 2000) {
                    this.byteStringQueue.clear();
                    break;
                }

                this.request(data);
            } else {
                this.notSoundCount++;
                if (this.notSoundCount >= 3) {
                    // stop recognizition
                    break;
                }
            }
        }
    } catch (RuntimeException e) {
        // Cancel RPC.
        requestObserver.onError(e);
        throw e;
    }
    // Mark the end of requests.
    requestObserver.onCompleted();

    // Receiving happens asynchronously.
    finishLatch.await(1, TimeUnit.MINUTES);
}

From source file:org.killbill.queue.DefaultQueueLifecycle.java

@Override
public boolean startQueue() {
    if (config.isProcessingOff() || !isStarted.compareAndSet(false, true)) {
        return false;
    }/*w  w w . j  a v  a2 s  . c o m*/

    isProcessingEvents = true;
    curActiveThreads = 0;

    final DefaultQueueLifecycle thePersistentQ = this;
    final CountDownLatch doneInitialization = new CountDownLatch(nbThreads);

    log.info(String.format("%s: Starting with %d threads", svcQName, nbThreads));

    for (int i = 0; i < nbThreads; i++) {
        executor.execute(new Runnable() {
            @Override
            public void run() {

                log.info(String.format("%s: Thread %s [%d] starting", svcQName,
                        Thread.currentThread().getName(), Thread.currentThread().getId()));

                synchronized (thePersistentQ) {
                    curActiveThreads++;
                }

                doneInitialization.countDown();

                try {
                    while (true) {
                        if (!isProcessingEvents) {
                            break;
                        }

                        final long beforeLoop = System.nanoTime();
                        try {
                            if (!isProcessingSuspended.get()) {
                                doProcessEvents();
                            }
                        } catch (Exception e) {
                            log.warn(String.format(
                                    "%s: Thread  %s  [%d] got an exception, catching and moving on...",
                                    svcQName, Thread.currentThread().getName(), Thread.currentThread().getId()),
                                    e);
                        } finally {
                            final long afterLoop = System.nanoTime();
                            sleepALittle((afterLoop - beforeLoop) / ONE_MILLION);
                        }
                    }
                } catch (InterruptedException e) {
                    log.info(String.format("%s: Thread %s got interrupted, exting... ", svcQName,
                            Thread.currentThread().getName()));
                } catch (Throwable e) {
                    log.error(String.format("%s: Thread %s got an exception, exting... ", svcQName,
                            Thread.currentThread().getName()), e);
                } finally {
                    log.info(String.format("%s: Thread %s has exited", svcQName,
                            Thread.currentThread().getName()));
                    synchronized (thePersistentQ) {
                        curActiveThreads--;
                        thePersistentQ.notify();
                    }
                }
            }

            private void sleepALittle(long loopTimeMsec) throws InterruptedException {
                final long remainingSleepTime = config.getSleepTimeMs() - loopTimeMsec;
                if (remainingSleepTime > 0) {
                    Thread.sleep(remainingSleepTime);
                }
            }
        });
    }
    try {
        final boolean success = doneInitialization.await(waitTimeoutMs, TimeUnit.MILLISECONDS);
        if (!success) {

            log.warn(String.format("%s: Failed to wait for all threads to be started, got %d/%d", svcQName,
                    (nbThreads - doneInitialization.getCount()), nbThreads));
        } else {
            log.info(String.format("%s: Done waiting for all threads to be started, got %d/%d", svcQName,
                    (nbThreads - doneInitialization.getCount()), nbThreads));
        }
    } catch (InterruptedException e) {
        log.warn(String.format("%s: Start sequence, got interrupted", svcQName));
    }
    return true;
}

From source file:org.apache.zookeeper.RemoveWatchesTest.java

/**
 * Test verifies WatcherType.Any - removes all the configured child,data
 * watcher functions// w ww  . ja va  2s  . co m
 */
@Test(timeout = 90000)
public void testRemoveAllWatchesOnAPath() throws Exception {
    zk1.create("/node1", null, Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
    final CountDownLatch watchCount = new CountDownLatch(2);
    final CountDownLatch rmWatchCount = new CountDownLatch(4);
    Watcher w1 = new Watcher() {
        @Override
        public void process(WatchedEvent event) {
            switch (event.getType()) {
            case ChildWatchRemoved:
            case DataWatchRemoved:
                rmWatchCount.countDown();
                break;
            case NodeChildrenChanged:
            case NodeDataChanged:
                watchCount.countDown();
                break;
            default:
                break;
            }
        }
    };
    Watcher w2 = new Watcher() {
        @Override
        public void process(WatchedEvent event) {
            switch (event.getType()) {
            case ChildWatchRemoved:
            case DataWatchRemoved:
                rmWatchCount.countDown();
                break;
            case NodeChildrenChanged:
            case NodeDataChanged:
                watchCount.countDown();
                break;
            default:
                break;
            }
        }
    };
    // Add multiple child watches
    LOG.info("Adding child watcher {} on path {}", new Object[] { w1, "/node1" });
    Assert.assertEquals("Didn't set child watches", 0, zk2.getChildren("/node1", w1).size());
    LOG.info("Adding child watcher {} on path {}", new Object[] { w2, "/node1" });
    Assert.assertEquals("Didn't set child watches", 0, zk2.getChildren("/node1", w2).size());

    // Add multiple data watches
    LOG.info("Adding data watcher {} on path {}", new Object[] { w1, "/node1" });
    Assert.assertNotNull("Didn't set data watches", zk2.exists("/node1", w1));
    LOG.info("Adding data watcher {} on path {}", new Object[] { w2, "/node1" });
    Assert.assertNotNull("Didn't set data watches", zk2.exists("/node1", w2));

    Assert.assertTrue("Server session is not a watcher",
            isServerSessionWatcher(zk2.getSessionId(), "/node1", WatcherType.Data));
    removeAllWatches(zk2, "/node1", WatcherType.Any, false, Code.OK);
    Assert.assertTrue("Didn't remove data watcher",
            rmWatchCount.await(CONNECTION_TIMEOUT, TimeUnit.MILLISECONDS));
    Assert.assertFalse("Server session is still a watcher after removal",
            isServerSessionWatcher(zk2.getSessionId(), "/node1", WatcherType.Data));
    Assert.assertEquals("Received watch notification after removal!", 2, watchCount.getCount());
}

From source file:org.apache.hadoop.hbase.procedure.TestZKProcedure.java

/**
 * Test a distributed commit with multiple cohort members, where one of the cohort members has a
 * timeout exception during the prepare stage.
 */// ww w . j  a va 2  s.com
@Test
public void testMultiCohortWithMemberTimeoutDuringPrepare() throws Exception {
    String opDescription = "error injection coordination";
    String[] cohortMembers = new String[] { "one", "two", "three" };
    List<String> expected = Lists.newArrayList(cohortMembers);
    // error constants
    final int memberErrorIndex = 2;
    final CountDownLatch coordinatorReceivedErrorLatch = new CountDownLatch(1);

    // start running the coordinator and its controller
    ZooKeeperWatcher coordinatorWatcher = newZooKeeperWatcher();
    ZKProcedureCoordinatorRpcs coordinatorController = new ZKProcedureCoordinatorRpcs(coordinatorWatcher,
            opDescription, COORDINATOR_NODE_NAME);
    ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
    ProcedureCoordinator coordinator = spy(new ProcedureCoordinator(coordinatorController, pool));

    // start a member for each node
    SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class);
    List<Pair<ProcedureMember, ZKProcedureMemberRpcs>> members = new ArrayList<Pair<ProcedureMember, ZKProcedureMemberRpcs>>(
            expected.size());
    for (String member : expected) {
        ZooKeeperWatcher watcher = newZooKeeperWatcher();
        ZKProcedureMemberRpcs controller = new ZKProcedureMemberRpcs(watcher, opDescription);
        ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE);
        ProcedureMember mem = new ProcedureMember(controller, pool2, subprocFactory);
        members.add(new Pair<ProcedureMember, ZKProcedureMemberRpcs>(mem, controller));
        controller.start(member, mem);
    }

    // setup mock subprocedures
    final List<Subprocedure> cohortTasks = new ArrayList<Subprocedure>();
    final int[] elem = new int[1];
    for (int i = 0; i < members.size(); i++) {
        ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher();
        final ProcedureMember comms = members.get(i).getFirst();
        Subprocedure commit = Mockito
                .spy(new SubprocedureImpl(comms, opName, cohortMonitor, WAKE_FREQUENCY, TIMEOUT));
        // This nasty bit has one of the impls throw a TimeoutException
        Mockito.doAnswer(new Answer<Void>() {
            @Override
            public Void answer(InvocationOnMock invocation) throws Throwable {
                int index = elem[0];
                if (index == memberErrorIndex) {
                    LOG.debug("Sending error to coordinator");
                    ForeignException remoteCause = new ForeignException("TIMER",
                            new TimeoutException("subprocTimeout", 1, 2, 0));
                    Subprocedure r = ((Subprocedure) invocation.getMock());
                    LOG.error("Remote commit failure, not propagating error:" + remoteCause);
                    comms.receiveAbortProcedure(r.getName(), remoteCause);
                    assertEquals(r.isComplete(), true);
                    // don't complete the error phase until the coordinator has gotten the error
                    // notification (which ensures that we never progress past prepare)
                    try {
                        Procedure.waitForLatch(coordinatorReceivedErrorLatch, new ForeignExceptionDispatcher(),
                                WAKE_FREQUENCY, "coordinator received error");
                    } catch (InterruptedException e) {
                        LOG.debug("Wait for latch interrupted, done:"
                                + (coordinatorReceivedErrorLatch.getCount() == 0));
                        // reset the interrupt status on the thread
                        Thread.currentThread().interrupt();
                    }
                }
                elem[0] = ++index;
                return null;
            }
        }).when(commit).acquireBarrier();
        cohortTasks.add(commit);
    }

    // pass out a task per member
    final int[] i = new int[] { 0 };
    Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName),
            (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer<Subprocedure>() {
                @Override
                public Subprocedure answer(InvocationOnMock invocation) throws Throwable {
                    int index = i[0];
                    Subprocedure commit = cohortTasks.get(index);
                    index++;
                    i[0] = index;
                    return commit;
                }
            });

    // setup spying on the coordinator
    ForeignExceptionDispatcher coordinatorTaskErrorMonitor = Mockito.spy(new ForeignExceptionDispatcher());
    Procedure coordinatorTask = Mockito.spy(new Procedure(coordinator, coordinatorTaskErrorMonitor,
            WAKE_FREQUENCY, TIMEOUT, opName, data, expected));
    when(coordinator.createProcedure(any(ForeignExceptionDispatcher.class), eq(opName), eq(data),
            anyListOf(String.class))).thenReturn(coordinatorTask);
    // count down the error latch when we get the remote error
    Mockito.doAnswer(new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // pass on the error to the master
            invocation.callRealMethod();
            // then count down the got error latch
            coordinatorReceivedErrorLatch.countDown();
            return null;
        }
    }).when(coordinatorTask).receive(Mockito.any(ForeignException.class));

    // ----------------------------
    // start running the operation
    // ----------------------------

    Procedure task = coordinator.startProcedure(coordinatorTaskErrorMonitor, opName, data, expected);
    assertEquals("Didn't mock coordinator task", coordinatorTask, task);

    // wait for the task to complete
    try {
        task.waitForCompleted();
    } catch (ForeignException fe) {
        // this may get caught or may not
    }

    // -------------
    // verification
    // -------------

    // always expect prepared, never committed, and possible to have cleanup and finish (racy since
    // error case)
    waitAndVerifyProc(coordinatorTask, once, never(), once, atMost(1), true);
    verifyCohortSuccessful(expected, subprocFactory, cohortTasks, once, never(), once, once, true);

    // close all the open things
    closeAll(coordinator, coordinatorController, members);
}

From source file:lcmc.cluster.ui.ClusterBrowser.java

void startCrmStatus() {
    final CountDownLatch firstTime = new CountDownLatch(1);
    final String clusterName = getCluster().getName();
    startClStatusProgressIndicator(clusterName);
    final Thread thread = new Thread(new Runnable() {
        @Override//from  w ww .j av a2 s  .  c o m
        public void run() {
            try {
                firstTime.await();
            } catch (final InterruptedException ignored) {
                Thread.currentThread().interrupt();
            }
            if (crmStatusFailed()) {
                progressIndicator.progressIndicatorFailed(clusterName,
                        Tools.getString("ClusterBrowser.ClusterStatusFailed"));
            } else {
                swingUtils.invokeLater(new Runnable() {
                    @Override
                    public void run() {
                        crmGraph.scale();
                    }
                });
            }
            stopClStatusProgressIndicator(clusterName);
        }
    });
    thread.start();
    crmStatusCanceledByUser = false;
    final Application.RunMode runMode = Application.RunMode.LIVE;
    while (true) {
        final Host host = getDCHost();
        if (host == null) {
            try {
                Thread.sleep(5000);
            } catch (final InterruptedException ex) {
                Thread.currentThread().interrupt();
            }
            continue;
        }
        //clStatusCanceled = false;
        host.execCrmStatusCommand(new ExecCallback() {
            @Override
            public void done(final String answer) {
                final String online = clusterStatus.isOnlineNode(host.getName());
                setCrmStatus(host, "yes".equals(online));
                firstTime.countDown();
            }

            @Override
            public void doneError(final String answer, final int exitCode) {
                if (firstTime.getCount() == 1) {
                    LOG.debug2("startClStatus: status failed: " + host.getName() + ", ec: " + exitCode);
                }
                clStatusLock();
                clusterStatus.setOnlineNode(host.getName(), "no");
                setCrmStatus(host, false);
                clusterStatus.setDC(null);
                clStatusUnlock();
                if (exitCode == 255) {
                    /* looks like connection was lost */
                    //crmGraph.repaint();
                    //host.getSSH().forceReconnect();
                    //host.setConnected();
                }
                firstTime.countDown();
            }
        },

                new NewOutputCallback() {
                    //TODO: check this buffer's size
                    private final StringBuffer clusterStatusOutput = new StringBuffer(300);

                    @Override
                    public void output(final String output) {
                        parseClusterOutput(output, clusterStatusOutput, host, firstTime, runMode);
                    }
                });
        host.waitForCrmStatusFinish();
        if (crmStatusCanceledByUser) {
            break;
        }
        try {
            Thread.sleep(5000);
        } catch (final InterruptedException ex) {
            Thread.currentThread().interrupt();
        }
    }
}

From source file:lcmc.gui.ClusterBrowser.java

/** Starts hb status. */
void startClStatus() {
    final CountDownLatch firstTime = new CountDownLatch(1);
    final String clusterName = getCluster().getName();
    startClStatusProgressIndicator(clusterName);
    final boolean testOnly = false;
    final Thread thread = new Thread(new Runnable() {
        @Override/*from   w  ww  . j a v a2s  .co m*/
        public void run() {
            try {
                firstTime.await();
            } catch (InterruptedException ignored) {
                Thread.currentThread().interrupt();
            }
            if (clStatusFailed()) {
                Tools.progressIndicatorFailed(clusterName,
                        Tools.getString("ClusterBrowser.ClusterStatusFailed"));
            } else {
                SwingUtilities.invokeLater(new Runnable() {
                    @Override
                    public void run() {
                        crmGraph.scale();
                    }
                });
            }
            stopClStatusProgressIndicator(clusterName);
        }
    });
    thread.start();
    clStatusCanceled = false;
    while (true) {
        final Host host = getDCHost();
        if (host == null) {
            try {
                Thread.sleep(5000);
            } catch (InterruptedException ex) {
                Thread.currentThread().interrupt();
            }
            continue;
        }
        final String hostName = host.getName();
        //clStatusCanceled = false;
        host.execClStatusCommand(new ExecCallback() {
            @Override
            public void done(final String ans) {
                final String online = clusterStatus.isOnlineNode(host.getName());
                setClStatus(host, "yes".equals(online));
                firstTime.countDown();
            }

            @Override
            public void doneError(final String ans, final int exitCode) {
                if (firstTime.getCount() == 1) {
                    Tools.debug(this, "hb status failed: " + host.getName() + ", ec: " + exitCode, 2);
                }
                clStatusLock();
                clusterStatus.setOnlineNode(host.getName(), "no");
                setClStatus(host, false);
                clusterStatus.setDC(null);
                clStatusUnlock();
                if (exitCode == 255) {
                    /* looks like connection was lost */
                    //crmGraph.repaint();
                    //host.getSSH().forceReconnect();
                    //host.setConnected();
                }
                firstTime.countDown();
            }
        },

                new NewOutputCallback() {
                    //TODO: check this buffer's size
                    private StringBuffer clusterStatusOutput = new StringBuffer(300);

                    @Override
                    public void output(final String output) {
                        processClusterOutput(output, clusterStatusOutput, host, firstTime, testOnly);
                    }
                });
        host.waitOnClStatus();
        if (clStatusCanceled) {
            break;
        }
        try {
            Thread.sleep(5000);
        } catch (InterruptedException ex) {
            Thread.currentThread().interrupt();
        }
    }
}

From source file:edu.brown.hstore.PartitionExecutor.java

/**
 * Special function that allows us to do some utility work while we are
 * waiting for a response or something real to do.
 *///from   w ww.  j  av a  2 s .  co  m
protected void utilityWork(CountDownLatch dtxnLatch) {
    // TODO: Set the txnId in our handle to be what the original txn was
    // that
    // deferred this query.

    /*
     * We need to start popping from the deferred_queue here. There is no
     * need for a while loop if we're going to requeue each popped txn in
     * wthe work_queue, because we know we this.work_queue.isEmpty() will be
     * false as soon as we pop one local txn off of deferred_queue. We will
     * arrive back in utilityWork() when that txn finishes if no new txn's
     * have entered.
     */
    do {
        LocalTransaction ts = deferred_queue.poll();
        if (ts == null)
            break;
        this.queueNewTransaction(ts);
    } while ((dtxnLatch != null && dtxnLatch.getCount() > 0)
            || (dtxnLatch == null && this.work_queue.isEmpty()));
    // while (this.work_queue.isEmpty()) {
    // }
    // Try to free some memory
    // this.tmp_fragmentParams.reset();
    // this.tmp_serializedParams.clear();
    // this.tmp_EEdependencies.clear();
}