Example usage for java.util.concurrent CountDownLatch countDown

List of usage examples for java.util.concurrent CountDownLatch countDown

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch countDown.

Prototype

public void countDown() 

Source Link

Document

Decrements the count of the latch, releasing all waiting threads if the count reaches zero.

Usage

From source file:com.astexample.Recognize.java

/** Send streaming recognize requests to server. */
public void recognize() throws InterruptedException, IOException {
    final CountDownLatch finishLatch = new CountDownLatch(1);
    StreamObserver<RecognizeResponse> responseObserver = new StreamObserver<RecognizeResponse>() {
        @Override/*ww w.  ja  v a  2s . c  o  m*/
        public void onNext(RecognizeResponse response) {
            logger.info("Received response: " + TextFormat.printToString(response));
        }

        @Override
        public void onError(Throwable error) {
            Status status = Status.fromThrowable(error);
            logger.log(Level.WARNING, "recognize failed: {0}", status);
            finishLatch.countDown();
        }

        @Override
        public void onCompleted() {
            logger.info("recognize completed.");
            finishLatch.countDown();
        }
    };

    StreamObserver<RecognizeRequest> requestObserver = stub.recognize(responseObserver);
    try {
        // Build and send a RecognizeRequest containing the parameters for processing the audio.
        InitialRecognizeRequest initial = InitialRecognizeRequest.newBuilder()
                .setEncoding(AudioEncoding.LINEAR16).setSampleRate(samplingRate).setInterimResults(true)
                .build();
        RecognizeRequest firstRequest = RecognizeRequest.newBuilder().setInitialRequest(initial).build();
        requestObserver.onNext(firstRequest);

        // Open audio file. Read and send sequential buffers of audio as additional RecognizeRequests.
        FileInputStream in = new FileInputStream(new File(file));
        // For LINEAR16 at 16000 Hz sample rate, 3200 bytes corresponds to 100 milliseconds of audio.
        byte[] buffer = new byte[3200];
        int bytesRead;
        int totalBytes = 0;
        while ((bytesRead = in.read(buffer)) != -1) {
            totalBytes += bytesRead;
            AudioRequest audio = AudioRequest.newBuilder().setContent(ByteString.copyFrom(buffer, 0, bytesRead))
                    .build();
            RecognizeRequest request = RecognizeRequest.newBuilder().setAudioRequest(audio).build();
            requestObserver.onNext(request);
            // To simulate real-time audio, sleep after sending each audio buffer.
            // For 16000 Hz sample rate, sleep 100 milliseconds.
            Thread.sleep(samplingRate / 160);
        }
        logger.info("Sent " + totalBytes + " bytes from audio file: " + file);
    } catch (RuntimeException e) {
        // Cancel RPC.
        requestObserver.onError(e);
        throw e;
    }
    // Mark the end of requests.
    requestObserver.onCompleted();

    // Receiving happens asynchronously.
    finishLatch.await(1, TimeUnit.MINUTES);
}

From source file:com.jbrisbin.vpc.jobsched.batch.BatchMessageHandler.java

public BatchMessage handleMessage(BatchMessage batch) throws Exception {
    log.debug("handling message: " + batch.toString());

    final BatchMessage results = new BatchMessage();
    results.setId(batch.getId());/*from w w  w.  ja  v a  2s  . co  m*/

    // For waiting till our results are all back
    final CountDownLatch latch = new CountDownLatch(batch.getMessages().size());

    Queue resultsQueue = rabbitAdmin.declareQueue();
    SimpleMessageListenerContainer listener = new SimpleMessageListenerContainer(connectionFactory);
    listener.setAutoAck(true);
    listener.setQueues(resultsQueue);
    listener.setMessageListener(new MessageListener() {
        public void onMessage(Message message) {
            String messageId = new String(message.getMessageProperties().getCorrelationId());
            String body = new String(message.getBody());
            results.getMessages().put(messageId, body);
            latch.countDown();
        }
    });
    listener.start();

    for (Map.Entry<String, String> msg : batch.getMessages().entrySet()) {
        final String[] parts = msg.getKey().split(":");
        template.send(parts[0], parts[1],
                new MessageCreator(parts[2], parts[3], resultsQueue.getName(), msg.getValue().getBytes()));
    }

    // Wait the timeout value per message for all results to be collected
    latch.await((batch.getTimeout() * batch.getMessages().size()), TimeUnit.MINUTES);

    return results;
}

From source file:org.eclipse.hono.example.ExampleSender.java

/**
 * Reads user input from the console and sends it to the Hono server.
 */// ww w. ja va2  s  .  c  o  m
@EventListener(classes = { ApplicationReadyEvent.class })
public void readMessagesFromStdin() {

    Runnable reader = new Runnable() {

        public void run() {
            try {
                // give Spring Boot some time to log its startup messages
                Thread.sleep(50);
            } catch (InterruptedException e) {
            }
            LOG.info("sender for tenant [{}] created successfully", tenantId);
            LOG.info("Enter some message(s) (hit return to send, ctrl-c to quit)");
            String input;
            Scanner scanner = new Scanner(System.in);
            do {
                input = scanner.nextLine();
                final String msg = input;
                if (!msg.isEmpty()) {

                    final Map<String, Object> properties = new HashMap<>();
                    properties.put("my_prop_string", "I'm a string");
                    properties.put("my_prop_int", 10);
                    final CountDownLatch latch = new CountDownLatch(1);
                    Future<Boolean> sendTracker = Future.future();
                    sendTracker.setHandler(s -> {
                        if (s.failed()) {
                            LOG.info(s.cause().getMessage());
                        }
                    });

                    getRegistrationAssertion().compose(token -> {
                        return send(msg, properties, token);
                    }).compose(sent -> {
                        latch.countDown();
                        sendTracker.complete();
                    }, sendTracker);

                    try {
                        if (!latch.await(2, TimeUnit.SECONDS)) {
                            sendTracker.fail("cannot connect to server");
                        }
                    } catch (InterruptedException e) {
                        // nothing to do
                    }
                }
            } while (!input.isEmpty());
            scanner.close();
        };
    };
    new Thread(reader).start();
}

From source file:de.hybris.platform.media.storage.impl.DefaultLocalMediaFileCacheServiceIntegrationTest.java

private void runThreadsWithLatch(final int numThreads, final CacheTestExecutor executor) {
    try {/* www . j  a  va  2  s.  c  o m*/
        final CountDownLatch latch = new CountDownLatch(numThreads);
        for (int i = 0; i < numThreads; i++) {
            new Thread(new Runnable() {
                @Override
                public void run() {
                    try {
                        executor.execute();
                    } finally {
                        latch.countDown();
                    }
                }

            }).start();

        }
        latch.await(10, TimeUnit.SECONDS);
    } catch (final InterruptedException e) {
        Thread.currentThread().interrupt();
    }
}

From source file:org.zodiark.subscriber.SubscriberTest.java

@Test(enabled = false)
public void createSessionTest() throws IOException, InterruptedException {
    final AtomicReference<SubscriberResults> answer = new AtomicReference<>();
    final ZodiarkClient publisherClient = new ZodiarkClient.Builder().path("http://127.0.0.1:" + port).build();
    final CountDownLatch latch = new CountDownLatch(1);

    publisherClient.handler(new OnEnvelopHandler() {
        @Override/*from w w w . j  a v a2  s. c o m*/
        public boolean onEnvelop(Envelope e) throws IOException {
            answer.set(mapper.readValue(e.getMessage().getData(), SubscriberResults.class));
            latch.countDown();
            return true;
        }
    }).open();

    Envelope createSessionMessage = Envelope
            .newClientToServerRequest(new Message(new Path(Paths.DB_POST_SUBSCRIBER_SESSION_CREATE),
                    mapper.writeValueAsString(new UserPassword("foo", "bar"))));
    createSessionMessage.setFrom(new From(ActorValue.SUBSCRIBER));
    publisherClient.send(createSessionMessage);
    latch.await();
    assertEquals("OK", answer.get().getResults());
}

From source file:ufo.remote.calls.benchmark.server.vertx.VertxServiceImpl.java

@PostConstruct
public void init() throws InterruptedException {
    final CountDownLatch latch = new CountDownLatch(1);
    final VertxOptions options = new VertxOptions();
    final Config conf = new Config();
    Vertx.clusteredVertx(options.setClusterHost("localhost").setClusterPort(0).setClustered(true)
            .setClusterManager(new HazelcastClusterManager(conf)), ar -> {
                if (ar.failed()) {
                    logger.error("Error starting Vertx cluster", ar.cause());
                }//from  w w  w .  j  a v  a2 s .c  om
                logger.info("Vertx cluster node started [{}]");
                vertx = ar.result();
                logger.info("Initialising vertx verticles...");
                vertx.deployVerticle(webServerVerticle);
                latch.countDown();
            });
    latch.await();
}

From source file:com.netflix.curator.framework.imps.TestFrameworkBackground.java

@Test
public void testRetries() throws Exception {
    final int SLEEP = 1000;
    final int TIMES = 5;

    Timing timing = new Timing();
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryNTimes(TIMES, SLEEP));
    try {//  w ww .  ja v  a  2  s.  com
        client.start();
        client.getZookeeperClient().blockUntilConnectedOrTimedOut();

        final CountDownLatch latch = new CountDownLatch(TIMES);
        final List<Long> times = Lists.newArrayList();
        final AtomicLong start = new AtomicLong(System.currentTimeMillis());
        ((CuratorFrameworkImpl) client).debugListener = new CuratorFrameworkImpl.DebugBackgroundListener() {
            @Override
            public void listen(OperationAndData<?> data) {
                if (data.getOperation().getClass().getName().contains("CreateBuilderImpl")) {
                    long now = System.currentTimeMillis();
                    times.add(now - start.get());
                    start.set(now);
                    latch.countDown();
                }
            }
        };

        server.stop();
        client.create().inBackground().forPath("/one");

        latch.await();

        for (long elapsed : times.subList(1, times.size())) // first one isn't a retry
        {
            Assert.assertTrue(elapsed >= SLEEP, elapsed + ": " + times);
        }
    } finally {
        IOUtils.closeQuietly(client);
    }
}

From source file:com.turo.pushy.apns.ApnsClientBenchmark.java

@Benchmark
@BenchmarkMode(Mode.SingleShotTime)//from w  ww.  j  ava 2s  .  c  om
@Threads(1)
@Measurement(iterations = 20, batchSize = 1)
@Warmup(iterations = 20, batchSize = 1)
public long testSendNotifications() throws InterruptedException {
    final CountDownLatch countDownLatch = new CountDownLatch(this.pushNotifications.size());

    for (final SimpleApnsPushNotification notification : this.pushNotifications) {
        this.client.sendNotification(notification).addListener(
                new GenericFutureListener<Future<PushNotificationResponse<SimpleApnsPushNotification>>>() {

                    @Override
                    public void operationComplete(
                            final Future<PushNotificationResponse<SimpleApnsPushNotification>> future) {
                        if (future.isSuccess()) {
                            countDownLatch.countDown();
                        }
                    }
                });
    }

    countDownLatch.await();
    return countDownLatch.getCount();
}

From source file:info.archinnov.achilles.test.integration.tests.AsyncEventInterceptorIT.java

@Test
public void should_apply_post_load_interceptor_on_raw_typed_query() throws Exception {
    // Given/*from w w  w  .  ja v  a  2s.c o  m*/
    CompleteBean entity = builder().randomId().name("DuyHai").label("label").buid();

    asyncManager.insert(entity).getImmediately();

    RegularStatement statement = select().from("CompleteBean").where(eq("id", bindMarker()));

    // When
    final CountDownLatch latch = new CountDownLatch(1);

    final AchillesFuture<CompleteBean> future = asyncManager
            .rawTypedQuery(CompleteBean.class, statement, entity.getId())
            .getFirst(new FutureCallback<Object>() {
                @Override
                public void onSuccess(Object result) {
                    latch.countDown();
                }

                @Override
                public void onFailure(Throwable t) {

                }
            });

    latch.await();

    // Then
    assertThat(future.get().getLabel()).isEqualTo("postLoad");
}

From source file:co.cask.cdap.data2.datafabric.dataset.service.DatasetServiceTestBase.java

@Before
public void before() throws Exception {
    CConfiguration cConf = CConfiguration.create();
    File dataDir = new File(tmpFolder.newFolder(), "data");
    cConf.set(Constants.CFG_LOCAL_DATA_DIR, dataDir.getAbsolutePath());
    if (!DirUtils.mkdirs(dataDir)) {
        throw new RuntimeException(String.format("Could not create DatasetFramework output dir %s", dataDir));
    }/*from  ww  w.j av  a2 s . c  o m*/
    cConf.set(Constants.Dataset.Manager.OUTPUT_DIR, dataDir.getAbsolutePath());
    cConf.set(Constants.Dataset.Manager.ADDRESS, "localhost");
    cConf.setBoolean(Constants.Dangerous.UNRECOVERABLE_RESET, true);

    // Starting DatasetService service
    discoveryService = new InMemoryDiscoveryService();
    MetricsCollectionService metricsCollectionService = new NoOpMetricsCollectionService();

    // Tx Manager to support working with datasets
    Configuration txConf = HBaseConfiguration.create();
    CConfigurationUtil.copyTxProperties(cConf, txConf);
    txManager = new TransactionManager(txConf);
    txManager.startAndWait();
    InMemoryTxSystemClient txSystemClient = new InMemoryTxSystemClient(txManager);
    TransactionSystemClientService txSystemClientService = new DelegatingTransactionSystemClientService(
            txSystemClient);

    final Injector injector = Guice.createInjector(new ConfigModule(cConf),
            new LocationRuntimeModule().getInMemoryModules(),
            new SystemDatasetRuntimeModule().getInMemoryModules(), new TransactionInMemoryModule());

    DatasetDefinitionRegistryFactory registryFactory = new DatasetDefinitionRegistryFactory() {
        @Override
        public DatasetDefinitionRegistry create() {
            DefaultDatasetDefinitionRegistry registry = new DefaultDatasetDefinitionRegistry();
            injector.injectMembers(registry);
            return registry;
        }
    };

    locationFactory = injector.getInstance(LocationFactory.class);
    NamespacedLocationFactory namespacedLocationFactory = injector.getInstance(NamespacedLocationFactory.class);
    dsFramework = new RemoteDatasetFramework(cConf, discoveryService, registryFactory);
    SystemDatasetInstantiatorFactory datasetInstantiatorFactory = new SystemDatasetInstantiatorFactory(
            locationFactory, dsFramework, cConf);

    DatasetAdminService datasetAdminService = new DatasetAdminService(dsFramework, cConf, locationFactory,
            datasetInstantiatorFactory, new NoOpMetadataStore());
    ImmutableSet<HttpHandler> handlers = ImmutableSet
            .<HttpHandler>of(new DatasetAdminOpHTTPHandler(datasetAdminService));
    opExecutorService = new DatasetOpExecutorService(cConf, discoveryService, metricsCollectionService,
            handlers);

    opExecutorService.startAndWait();

    ImmutableMap<String, DatasetModule> modules = ImmutableMap.<String, DatasetModule>builder()
            .putAll(injector.getInstance(Key.get(new TypeLiteral<Map<String, DatasetModule>>() {
            }, Names.named("defaultDatasetModules")))).putAll(DatasetMetaTableUtil.getModules()).build();

    TransactionExecutorFactory txExecutorFactory = injector.getInstance(TransactionExecutorFactory.class);

    inMemoryDatasetFramework = new InMemoryDatasetFramework(registryFactory, modules, cConf);
    MDSDatasetsRegistry mdsDatasetsRegistry = new MDSDatasetsRegistry(txSystemClientService,
            inMemoryDatasetFramework);

    ExploreFacade exploreFacade = new ExploreFacade(new DiscoveryExploreClient(cConf, discoveryService), cConf);
    namespaceStore = new InMemoryNamespaceStore();
    namespaceStore.create(NamespaceMeta.DEFAULT);
    DatasetInstanceService instanceService = new DatasetInstanceService(
            new DatasetTypeManager(cConf, mdsDatasetsRegistry, locationFactory,
                    // we don't need any default modules in this test
                    Collections.<String, DatasetModule>emptyMap()),
            new DatasetInstanceManager(mdsDatasetsRegistry), new InMemoryDatasetOpExecutor(dsFramework),
            exploreFacade, cConf, txExecutorFactory, registryFactory, namespaceStore);

    service = new DatasetService(cConf, namespacedLocationFactory, discoveryService, discoveryService,
            new DatasetTypeManager(cConf, mdsDatasetsRegistry, locationFactory,
                    // we don't need any default modules in this test
                    Collections.<String, DatasetModule>emptyMap()),
            metricsCollectionService, new InMemoryDatasetOpExecutor(dsFramework), mdsDatasetsRegistry,
            new HashSet<DatasetMetricsReporter>(), instanceService,
            new LocalStorageProviderNamespaceAdmin(cConf, namespacedLocationFactory, exploreFacade),
            namespaceStore);

    // Start dataset service, wait for it to be discoverable
    service.start();
    final CountDownLatch startLatch = new CountDownLatch(1);
    discoveryService.discover(Constants.Service.DATASET_MANAGER)
            .watchChanges(new ServiceDiscovered.ChangeListener() {
                @Override
                public void onChange(ServiceDiscovered serviceDiscovered) {
                    if (!Iterables.isEmpty(serviceDiscovered)) {
                        startLatch.countDown();
                    }
                }
            }, Threads.SAME_THREAD_EXECUTOR);

    startLatch.await(5, TimeUnit.SECONDS);
    // this usually happens while creating a namespace, however not doing that in data fabric tests
    Locations.mkdirsIfNotExists(namespacedLocationFactory.get(Id.Namespace.DEFAULT));
}