List of usage examples for java.util.concurrent CountDownLatch countDown
public void countDown()
From source file:net.minecraftforge.fml.common.FMLCommonHandler.java
public void handleServerStopped() { sidedDelegate.serverStopped();/* w w w . java 2 s.c o m*/ MinecraftServer server = getMinecraftServerInstance(); Loader.instance().serverStopped(); // FORCE the internal server to stop: hello optifine workaround! if (server != null) ObfuscationReflectionHelper.setPrivateValue(MinecraftServer.class, server, false, "field_71316" + "_v", "u", "serverStopped"); // allow any pending exit to continue, clear exitLatch CountDownLatch latch = exitLatch; if (latch != null) { latch.countDown(); exitLatch = null; } }
From source file:org.eclipse.hono.service.AbstractApplication.java
/** * Stops this application in a controlled fashion. * //www . j av a 2 s.c o m * @param maxWaitTime The maximum time to wait for the server to shut down (in seconds). * @param shutdownHandler The handler to invoke with the result of the shutdown attempt. */ public final void shutdown(final long maxWaitTime, final Handler<Boolean> shutdownHandler) { try { log.info("shutting down application..."); preShutdown(); final CountDownLatch latch = new CountDownLatch(1); stopHealthCheckServer().setHandler(result -> { if (vertx != null) { log.info("closing vert.x instance ..."); vertx.close(r -> { if (r.failed()) { log.error("could not close vert.x instance", r.cause()); } latch.countDown(); }); } else { latch.countDown(); // nothing to wait for } }); if (latch.await(maxWaitTime, TimeUnit.SECONDS)) { log.info("application has been shut down successfully"); shutdownHandler.handle(Boolean.TRUE); } else { log.error("shut down timed out, aborting..."); shutdownHandler.handle(Boolean.FALSE); } } catch (InterruptedException e) { log.error("application shut down has been interrupted, aborting..."); Thread.currentThread().interrupt(); shutdownHandler.handle(Boolean.FALSE); } }
From source file:edu.cornell.med.icb.R.TestRConnectionPool.java
/** * Checks that two threads actually get the same connection pool. * @throws InterruptedException if the threads are interrupted during the test */// w w w . j av a 2 s . c o m @Test public void validateSingleton() throws InterruptedException { final RConnectionPool[] pools = new RConnectionPool[2]; final CountDownLatch latch = new CountDownLatch(2); final ExecutorService threadPool = Executors.newCachedThreadPool(); try { threadPool.submit(new Callable<Boolean>() { public Boolean call() { pools[0] = RConnectionPool.getInstance(); latch.countDown(); return true; } }); threadPool.submit(new Callable<Boolean>() { public Boolean call() { pools[1] = RConnectionPool.getInstance(); latch.countDown(); return true; } }); latch.await(); assertNotNull("Connection pool should never be null", pools[0]); assertNotNull("Connection pool should never be null", pools[1]); assertEquals("Pools should be the same", pools[0], pools[1]); } finally { threadPool.shutdown(); if (pools[0] != null) { pools[0].close(); } if (pools[1] != null) { pools[1].close(); } } }
From source file:com.vmware.photon.controller.api.client.resource.FlavorApiTest.java
@Test public void testListAllAsync() throws Exception { Flavor flavor1 = new Flavor(); flavor1.setId("flavor1"); flavor1.setKind("vm"); Flavor flavor2 = new Flavor(); flavor2.setId("flavor2"); flavor2.setKind("vm"); final ResourceList<Flavor> flavorResourceList = new ResourceList<>(Arrays.asList(flavor1, flavor2)); ObjectMapper mapper = new ObjectMapper(); String serializedResponse = mapper.writeValueAsString(flavorResourceList); setupMocks(serializedResponse, HttpStatus.SC_OK); FlavorApi flavorApi = new FlavorApi(restClient); final CountDownLatch latch = new CountDownLatch(1); flavorApi.listAllAsync(new FutureCallback<ResourceList<Flavor>>() { @Override/* w ww . jav a2 s . c o m*/ public void onSuccess(@Nullable ResourceList<Flavor> result) { assertEquals(result.getItems(), flavorResourceList.getItems()); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:com.vmware.photon.controller.api.client.resource.FlavorRestApiTest.java
@Test public void testListAllAsync() throws Exception { Flavor flavor1 = new Flavor(); flavor1.setId("flavor1"); flavor1.setKind("vm"); Flavor flavor2 = new Flavor(); flavor2.setId("flavor2"); flavor2.setKind("vm"); final ResourceList<Flavor> flavorResourceList = new ResourceList<>(Arrays.asList(flavor1, flavor2)); ObjectMapper mapper = new ObjectMapper(); String serializedResponse = mapper.writeValueAsString(flavorResourceList); setupMocks(serializedResponse, HttpStatus.SC_OK); FlavorApi flavorApi = new FlavorRestApi(restClient); final CountDownLatch latch = new CountDownLatch(1); flavorApi.listAllAsync(new FutureCallback<ResourceList<Flavor>>() { @Override/*from w w w. j a v a 2 s .c om*/ public void onSuccess(@Nullable ResourceList<Flavor> result) { assertEquals(result.getItems(), flavorResourceList.getItems()); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:com.couchbase.lite.syncgateway.GzippedAttachmentTest.java
/** * https://github.com/couchbase/couchbase-lite-android/issues/197 * Gzipped attachment support with Replicator does not seem to be working * <p/>/* ww w.ja v a2 s. c om*/ * https://github.com/couchbase/couchbase-lite-android/blob/master/src/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java#L2071 */ public void testGzippedAttachment() throws Exception { if (!syncgatewayTestsEnabled()) { return; } Database pushDB = manager.getDatabase("pushdb"); Database pullDB = manager.getDatabase("pulldb"); String attachmentName = "attachment.png"; // 1. store attachment with doc // 1.a load attachment data from asset InputStream attachmentStream = getAsset(attachmentName); java.io.ByteArrayOutputStream baos = new java.io.ByteArrayOutputStream(); IOUtils.copy(attachmentStream, baos); baos.close(); attachmentStream.close(); byte[] bytes = baos.toByteArray(); // 1.b apply GZIP + Base64 String attachmentBase64 = Base64.encodeBytes(bytes, Base64.GZIP); // 1.c attachment Map object Map<String, Object> attachmentMap = new HashMap<String, Object>(); attachmentMap.put("content_type", "image/png"); attachmentMap.put("data", attachmentBase64); attachmentMap.put("encoding", "gzip"); attachmentMap.put("length", bytes.length); // 1.d attachments Map object Map<String, Object> attachmentsMap = new HashMap<String, Object>(); attachmentsMap.put(attachmentName, attachmentMap); // 1.e document property Map object Map<String, Object> propsMap = new HashMap<String, Object>(); propsMap.put("_attachments", attachmentsMap); // 1.f store document into database Document putDoc = pushDB.createDocument(); putDoc.putProperties(propsMap); String docId = putDoc.getId(); URL remote = getReplicationURL(); // push final CountDownLatch latch1 = new CountDownLatch(1); Replication pusher = pushDB.createPushReplication(remote); pusher.addChangeListener(new Replication.ChangeListener() { @Override public void changed(Replication.ChangeEvent event) { Log.e(TAG, "push 1:" + event.toString()); if (event.getCompletedChangeCount() > 0) { latch1.countDown(); } } }); runReplication(pusher); assertTrue(latch1.await(30, TimeUnit.SECONDS)); // pull Replication puller = pullDB.createPullReplication(remote); final CountDownLatch latch2 = new CountDownLatch(1); puller.addChangeListener(new Replication.ChangeListener() { @Override public void changed(Replication.ChangeEvent event) { Log.e(TAG, "pull 1:" + event.toString()); if (event.getCompletedChangeCount() > 0) { latch2.countDown(); } } }); runReplication(puller); assertTrue(latch2.await(30, TimeUnit.SECONDS)); Log.e(TAG, "Fetching doc1 via id: " + docId); Document pullDoc = pullDB.getDocument(docId); assertNotNull(pullDoc); assertTrue(pullDoc.getCurrentRevisionId().startsWith("1-")); Attachment attachment = pullDoc.getCurrentRevision().getAttachment(attachmentName); assertEquals(bytes.length, attachment.getLength()); assertEquals("image/png", attachment.getContentType()); assertEquals("gzip", attachment.getMetadata().get("encoding")); InputStream is = attachment.getContent(); byte[] receivedBytes = getBytesFromInputStream(is); assertEquals(bytes.length, receivedBytes.length); is.close(); assertTrue(Arrays.equals(bytes, receivedBytes)); pushDB.close(); pullDB.close(); pushDB.delete(); pullDB.delete(); }
From source file:oz.hadoop.yarn.api.core.AbstractApplicationMasterLauncher.java
/** * /* ww w . j a v a 2 s. co m*/ * @param finite * @return */ @SuppressWarnings("unchecked") private T buildLaunchResult(boolean finite) { T returnValue = null; if (!finite) { DataProcessorImpl dp = new DataProcessorImpl(this.clientServer); return (T) dp; } else { final ContainerDelegate[] containerDelegates = clientServer.getContainerDelegates(); if (containerDelegates.length == 0) { logger.warn("ClientServer returned 0 ContainerDelegates"); } else { if (logger.isDebugEnabled()) { logger.debug("Sending start to Application Containers"); } final CountDownLatch completionLatch = new CountDownLatch(containerDelegates.length); for (ContainerDelegate containerDelegate : containerDelegates) { containerDelegate.process(ByteBuffer.wrap("START".getBytes()), new ReplyPostProcessor() { @Override public void doProcess(ByteBuffer reply) { completionLatch.countDown(); } }); } } // if (this.clientServer.isRunning()){ // /* // * By initiating a graceful shutdown we simply sending a signal // * for an application to stop once complete. // */ // this.executor.execute(new Runnable() { // @Override // public void run() { // clientServer.stop(false); // } // }); // } returnValue = null; } return returnValue; }
From source file:com.twitter.distributedlog.client.proxy.TestProxyClientManager.java
@Test(timeout = 60000) public void testHandshake() throws Exception { final int numHosts = 3; final int numStreamsPerHost = 3; final int initialPort = 4000; MockProxyClientBuilder builder = new MockProxyClientBuilder(); Map<SocketAddress, ServerInfo> serverInfoMap = new HashMap<SocketAddress, ServerInfo>(); for (int i = 0; i < numHosts; i++) { SocketAddress address = createSocketAddress(initialPort + i); ServerInfo serverInfo = new ServerInfo(); for (int j = 0; j < numStreamsPerHost; j++) { serverInfo.putToOwnerships(runtime.getMethodName() + "_stream_" + j, address.toString()); }// w ww . j a v a2s .c o m Pair<MockProxyClient, MockServerInfoService> mockProxyClient = createMockProxyClient(address, serverInfo); builder.provideProxyClient(address, mockProxyClient.getLeft()); serverInfoMap.put(address, serverInfo); } final Map<SocketAddress, ServerInfo> results = new HashMap<SocketAddress, ServerInfo>(); final CountDownLatch doneLatch = new CountDownLatch(2 * numHosts); ProxyListener listener = new ProxyListener() { @Override public void onHandshakeSuccess(SocketAddress address, ProxyClient client, ServerInfo serverInfo) { synchronized (results) { results.put(address, serverInfo); } doneLatch.countDown(); } @Override public void onHandshakeFailure(SocketAddress address, ProxyClient client, Throwable cause) { } }; TestHostProvider rs = new TestHostProvider(); ProxyClientManager clientManager = createProxyClientManager(builder, rs, 0L); clientManager.registerProxyListener(listener); assertEquals("There should be no clients in the manager", 0, clientManager.getNumProxies()); for (int i = 0; i < numHosts; i++) { rs.addHost(createSocketAddress(initialPort + i)); } // handshake would handshake with 3 hosts again clientManager.handshake(); doneLatch.await(); assertEquals("Handshake should return server info", numHosts, results.size()); assertTrue("Handshake should get all server infos", Maps.difference(serverInfoMap, results).areEqual()); }
From source file:io.syndesis.connector.sql.stored.SqlStoredStartConnectorComponentTest.java
@Test public void camelConnectorTest() throws Exception { BasicDataSource ds = new BasicDataSource(); ds.setUsername(properties.getProperty("sql-stored-start-connector.user")); ds.setPassword(properties.getProperty("sql-stored-start-connector.password")); ds.setUrl(properties.getProperty("sql-stored-start-connector.url")); SimpleRegistry registry = new SimpleRegistry(); registry.put("dataSource", ds); CamelContext context = new DefaultCamelContext(registry); CountDownLatch latch = new CountDownLatch(1); final Result result = new Result(); try {//ww w. j a va2 s .c om context.addRoutes(new RouteBuilder() { @Override public void configure() throws Exception { from("sql-stored-start-connector:DEMO_OUT( OUT INTEGER c)").process(new Processor() { @Override public void process(Exchange exchange) throws Exception { String jsonBean = (String) exchange.getIn().getBody(); result.setResult(jsonBean); latch.countDown(); } }).to("stream:out"); } }); context.start(); latch.await(5l, TimeUnit.SECONDS); Assert.assertEquals("{\"c\":60}", result.getJsonBean()); } finally { context.stop(); } }
From source file:com.examples.cloud.speech.StreamingRecognizeClient.java
/** Send streaming recognize requests to server. */ public void recognize() throws InterruptedException, IOException { final CountDownLatch finishLatch = new CountDownLatch(1); StreamObserver<StreamingRecognizeResponse> responseObserver = new StreamObserver<StreamingRecognizeResponse>() { @Override/* w w w. j a v a 2 s . c o m*/ public void onNext(StreamingRecognizeResponse response) { logger.info("Received response: " + TextFormat.printToString(response)); } @Override public void onError(Throwable error) { logger.log(Level.WARN, "recognize failed: {0}", error); finishLatch.countDown(); } @Override public void onCompleted() { logger.info("recognize completed."); finishLatch.countDown(); } }; StreamObserver<StreamingRecognizeRequest> requestObserver = speechClient .streamingRecognize(responseObserver); try { // Build and send a StreamingRecognizeRequest containing the parameters for // processing the audio. RecognitionConfig config = RecognitionConfig.newBuilder().setEncoding(AudioEncoding.LINEAR16) .setSampleRate(samplingRate).build(); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config) .setInterimResults(true).setSingleUtterance(true).build(); StreamingRecognizeRequest initial = StreamingRecognizeRequest.newBuilder() .setStreamingConfig(streamingConfig).build(); requestObserver.onNext(initial); // Open audio file. Read and send sequential buffers of audio as additional RecognizeRequests. FileInputStream in = new FileInputStream(new File(file)); // For LINEAR16 at 16000 Hz sample rate, 3200 bytes corresponds to 100 milliseconds of audio. byte[] buffer = new byte[BYTES_PER_BUFFER]; int bytesRead; int totalBytes = 0; int samplesPerBuffer = BYTES_PER_BUFFER / BYTES_PER_SAMPLE; int samplesPerMillis = samplingRate / 1000; while ((bytesRead = in.read(buffer)) != -1) { totalBytes += bytesRead; StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(buffer, 0, bytesRead)).build(); requestObserver.onNext(request); // To simulate real-time audio, sleep after sending each audio buffer. Thread.sleep(samplesPerBuffer / samplesPerMillis); } logger.info("Sent " + totalBytes + " bytes from audio file: " + file); } catch (RuntimeException e) { // Cancel RPC. requestObserver.onError(e); throw e; } // Mark the end of requests. requestObserver.onCompleted(); // Receiving happens asynchronously. finishLatch.await(1, TimeUnit.MINUTES); }