Example usage for java.util.concurrent CountDownLatch await

List of usage examples for java.util.concurrent CountDownLatch await

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch await.

Prototype

public boolean await(long timeout, TimeUnit unit) throws InterruptedException 

Source Link

Document

Causes the current thread to wait until the latch has counted down to zero, unless the thread is Thread#interrupt interrupted , or the specified waiting time elapses.

Usage

From source file:com.bj58.spat.gaea.server.util.async.AsyncWorker.java

private void execTimeoutTask() {
    try {// w ww  . j  a  v a2 s .  c  o  m
        final AsyncTask task = taskQueue.take();
        if (task != null) {
            if ((System.currentTimeMillis() - task.getAddTime()) > task.getQtimeout()) {
                task.getHandler().exceptionCaught(new TimeoutException("async task timeout!"));
                return;
            } else {
                final CountDownLatch cdl = new CountDownLatch(1);
                executor.execute(new Runnable() {
                    @Override
                    public void run() {
                        try {
                            Object obj = task.getHandler().run();
                            task.getHandler().messageReceived(obj);
                        } catch (Throwable ex) {
                            task.getHandler().exceptionCaught(ex);
                        } finally {
                            cdl.countDown();
                        }
                    }
                });
                cdl.await(getTimeout(task.getTimeout(), taskQueue.size()), TimeUnit.MILLISECONDS);
                if (cdl.getCount() > 0) {
                    task.getHandler().exceptionCaught(new TimeoutException("async task timeout!"));
                }
            }
        } else {
            logger.error("execTimeoutTask take task is null");
        }
    } catch (InterruptedException ie) {
        logger.error("");
    } catch (Throwable e) {
        logger.error("get task from poll error", e);
    }
}

From source file:org.wisdom.framework.vertx.FileUploadTest.java

@Test
public void testFileUploadOfSmallFiles() throws InterruptedException, IOException {
    // Prepare the configuration
    ApplicationConfiguration configuration = mock(ApplicationConfiguration.class);
    when(configuration.getIntegerWithDefault(eq("vertx.http.port"), anyInt())).thenReturn(0);
    when(configuration.getIntegerWithDefault(eq("vertx.https.port"), anyInt())).thenReturn(-1);
    when(configuration.getIntegerWithDefault("vertx.acceptBacklog", -1)).thenReturn(-1);
    when(configuration.getIntegerWithDefault("vertx.receiveBufferSize", -1)).thenReturn(-1);
    when(configuration.getIntegerWithDefault("vertx.sendBufferSize", -1)).thenReturn(-1);
    when(configuration.getIntegerWithDefault("request.body.max.size", 100 * 1024)).thenReturn(100 * 1024);
    when(configuration.getLongWithDefault("http.upload.disk.threshold", DiskFileUpload.MINSIZE))
            .thenReturn(DiskFileUpload.MINSIZE);
    when(configuration.getLongWithDefault("http.upload.max", -1l)).thenReturn(-1l);
    when(configuration.getStringArray("wisdom.websocket.subprotocols")).thenReturn(new String[0]);
    when(configuration.getStringArray("vertx.websocket-subprotocols")).thenReturn(new String[0]);

    // Prepare the router with a controller
    Controller controller = new DefaultController() {
        @SuppressWarnings("unused")
        public Result index() throws IOException {
            FileItem item = context().file("upload");
            if (!item.isInMemory()) {
                return badRequest("In memory expected");
            }//w  w w .  j  a v  a2s .  c o m
            if (!item.name().equals("my-file.dat")) {
                return badRequest("broken name");
            }
            if (item.size() != 2048) {
                return badRequest("broken file");
            }

            if (!context().form().get("comment").get(0).equals("my description")) {
                return badRequest("broken form");
            }

            final File file = item.toFile();
            if (!file.exists() && file.length() != 2048) {
                return badRequest("broken in memory to file handling");
            }

            return ok(item.stream()).as(MimeTypes.BINARY);
        }
    };
    Router router = mock(Router.class);
    Route route = new RouteBuilder().route(HttpMethod.POST).on("/").to(controller, "index");
    when(router.getRouteFor(anyString(), anyString(), any(Request.class))).thenReturn(route);

    ContentEngine contentEngine = getMockContentEngine();

    // Configure the server.
    server = new WisdomVertxServer();
    server.accessor = new ServiceAccessor(null, configuration, router, contentEngine, executor, null,
            Collections.<ExceptionMapper>emptyList());
    server.configuration = configuration;
    server.vertx = vertx;
    server.start();

    VertxHttpServerTest.waitForStart(server);

    // Now start bunch of clients
    CountDownLatch startSignal = new CountDownLatch(1);
    CountDownLatch doneSignal = new CountDownLatch(NUMBER_OF_CLIENTS);

    int port = server.httpPort();

    for (int i = 1; i < NUMBER_OF_CLIENTS + 1; ++i) {
        // create and start threads
        clients.execute(new Client(startSignal, doneSignal, port, i, 2048));
    }

    startSignal.countDown(); // let all threads proceed
    if (!doneSignal.await(60, TimeUnit.SECONDS)) { // wait for all to finish
        Assert.fail("testFileUploadOfSmallFiles - Client not served in time");
    }

    assertThat(failure).isEmpty();
    assertThat(success).hasSize(NUMBER_OF_CLIENTS);
}

From source file:com.couchbase.lite.syncgateway.GzippedAttachmentTest.java

/**
 * https://github.com/couchbase/couchbase-lite-android/issues/197
 * Gzipped attachment support with Replicator does not seem to be working
 * <p/>//from w  ww .  j av a2s  .  com
 * https://github.com/couchbase/couchbase-lite-android/blob/master/src/androidTest/java/com/couchbase/lite/replicator/ReplicationTest.java#L2071
 */
public void testGzippedAttachment() throws Exception {
    if (!syncgatewayTestsEnabled()) {
        return;
    }

    Database pushDB = manager.getDatabase("pushdb");
    Database pullDB = manager.getDatabase("pulldb");

    String attachmentName = "attachment.png";

    // 1. store attachment with doc
    // 1.a load attachment data from asset
    InputStream attachmentStream = getAsset(attachmentName);
    java.io.ByteArrayOutputStream baos = new java.io.ByteArrayOutputStream();
    IOUtils.copy(attachmentStream, baos);
    baos.close();
    attachmentStream.close();
    byte[] bytes = baos.toByteArray();

    // 1.b apply GZIP + Base64
    String attachmentBase64 = Base64.encodeBytes(bytes, Base64.GZIP);

    // 1.c attachment Map object
    Map<String, Object> attachmentMap = new HashMap<String, Object>();
    attachmentMap.put("content_type", "image/png");
    attachmentMap.put("data", attachmentBase64);
    attachmentMap.put("encoding", "gzip");
    attachmentMap.put("length", bytes.length);

    // 1.d attachments Map object
    Map<String, Object> attachmentsMap = new HashMap<String, Object>();
    attachmentsMap.put(attachmentName, attachmentMap);

    // 1.e document property Map object
    Map<String, Object> propsMap = new HashMap<String, Object>();
    propsMap.put("_attachments", attachmentsMap);

    // 1.f store document into database
    Document putDoc = pushDB.createDocument();
    putDoc.putProperties(propsMap);
    String docId = putDoc.getId();

    URL remote = getReplicationURL();

    // push
    final CountDownLatch latch1 = new CountDownLatch(1);
    Replication pusher = pushDB.createPushReplication(remote);
    pusher.addChangeListener(new Replication.ChangeListener() {
        @Override
        public void changed(Replication.ChangeEvent event) {
            Log.e(TAG, "push 1:" + event.toString());
            if (event.getCompletedChangeCount() > 0) {
                latch1.countDown();
            }
        }
    });
    runReplication(pusher);
    assertTrue(latch1.await(30, TimeUnit.SECONDS));

    // pull
    Replication puller = pullDB.createPullReplication(remote);
    final CountDownLatch latch2 = new CountDownLatch(1);
    puller.addChangeListener(new Replication.ChangeListener() {
        @Override
        public void changed(Replication.ChangeEvent event) {
            Log.e(TAG, "pull 1:" + event.toString());
            if (event.getCompletedChangeCount() > 0) {
                latch2.countDown();
            }
        }
    });
    runReplication(puller);
    assertTrue(latch2.await(30, TimeUnit.SECONDS));

    Log.e(TAG, "Fetching doc1 via id: " + docId);
    Document pullDoc = pullDB.getDocument(docId);
    assertNotNull(pullDoc);
    assertTrue(pullDoc.getCurrentRevisionId().startsWith("1-"));
    Attachment attachment = pullDoc.getCurrentRevision().getAttachment(attachmentName);

    assertEquals(bytes.length, attachment.getLength());
    assertEquals("image/png", attachment.getContentType());
    assertEquals("gzip", attachment.getMetadata().get("encoding"));

    InputStream is = attachment.getContent();
    byte[] receivedBytes = getBytesFromInputStream(is);
    assertEquals(bytes.length, receivedBytes.length);
    is.close();

    assertTrue(Arrays.equals(bytes, receivedBytes));

    pushDB.close();
    pullDB.close();

    pushDB.delete();
    pullDB.delete();
}

From source file:com.vmware.photon.controller.api.client.resource.DisksApiTest.java

@Test
public void testGetTasksForDisksAsync() throws IOException, InterruptedException {
    Task task1 = new Task();
    task1.setId("task1");

    Task task2 = new Task();
    task2.setId("task2");

    final ResourceList<Task> taskResourceList = new ResourceList<>(Arrays.asList(task1, task2));

    ObjectMapper mapper = new ObjectMapper();
    String serializedTask = mapper.writeValueAsString(taskResourceList);

    setupMocks(serializedTask, HttpStatus.SC_OK);

    DisksApi disksApi = new DisksApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);

    disksApi.getTasksForDiskAsync("persistentDisk", new FutureCallback<ResourceList<Task>>() {
        @Override/*from   www. ja va 2 s .  c  o  m*/
        public void onSuccess(@Nullable ResourceList<Task> result) {
            assertEquals(result.getItems(), taskResourceList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
    ;
}

From source file:com.vmware.photon.controller.api.client.resource.DisksRestApiTest.java

@Test
public void testGetTasksForDisksAsync() throws IOException, InterruptedException {
    Task task1 = new Task();
    task1.setId("task1");

    Task task2 = new Task();
    task2.setId("task2");

    final ResourceList<Task> taskResourceList = new ResourceList<>(Arrays.asList(task1, task2));

    ObjectMapper mapper = new ObjectMapper();
    String serializedTask = mapper.writeValueAsString(taskResourceList);

    setupMocks(serializedTask, HttpStatus.SC_OK);

    DisksApi disksApi = new DisksRestApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);

    disksApi.getTasksForDiskAsync("persistentDisk", new FutureCallback<ResourceList<Task>>() {
        @Override//from w  w w  .  j  ava  2 s.c om
        public void onSuccess(@Nullable ResourceList<Task> result) {
            assertEquals(result.getItems(), taskResourceList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
    ;
}

From source file:nl.edia.xapi.proxy.StatementMethodInterceptorTest.java

@Test
public void test1a() throws Throwable {
    MethodInvocation mock = Mockito.mock(MethodInvocation.class);
    when(mock.getMethod()).thenReturn(getMethod("doSomeThing1a"));
    when(mock.getArguments()).thenReturn(
            new Object[] { Mockito.mock(User.class), Mockito.mock(Course.class), Mockito.mock(Module.class) });
    assertTrue(isaSync());/*from   w  ww  . j  a  v  a2 s .  com*/
    {
        final CountDownLatch latch = new CountDownLatch(1);
        when(statementClient.postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class)))
                .then(new Answer<String>() {

                    @Override
                    public String answer(InvocationOnMock invocation) throws Throwable {
                        latch.countDown();
                        return "OK";
                    }
                });
        invoke(mock);
        assertTrue(latch.await(10, TimeUnit.SECONDS));
        verify(this.statementClientFactory, Mockito.times(1)).build(Mockito.eq(mock), Mockito.any());
        verify(statementClient, Mockito.times(1))
                .postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class));
    }

    // Without sync
    setaSync(false);
    assertFalse(isaSync());
    invoke(mock);
    verify(this.statementClientFactory, Mockito.times(2)).build(Mockito.eq(mock), Mockito.any());
    verify(statementClient, Mockito.times(2)).postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class));

    when(statementClient.postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class)))
            .thenThrow(IOException.class);
    // Try an exception async false
    setaSync(false);
    assertFalse(isaSync());
    invoke(mock);
    verify(this.statementClientFactory, Mockito.times(3)).build(Mockito.eq(mock), Mockito.any());
    verify(statementClient, Mockito.times(3)).postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class));
    {
        // Try an exception async true
        setaSync(true);
        assertTrue(isaSync());
        final CountDownLatch latch = new CountDownLatch(1);
        Mockito.reset(statementClient);
        when(statementClient.postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class)))
                .then(new Answer<String>() {

                    @Override
                    public String answer(InvocationOnMock invocation) throws Throwable {
                        latch.countDown();
                        return "OK";
                    }
                });
        invoke(mock);
        assertTrue(latch.await(10, TimeUnit.SECONDS));
        verify(this.statementClientFactory, Mockito.times(4)).build(Mockito.eq(mock), Mockito.any());
        verify(statementClient, Mockito.times(1))
                .postStatement(Mockito.any(gov.adlnet.xapi.model.Statement.class));
    }

}

From source file:io.openvidu.server.recording.service.SingleStreamRecordingService.java

public Recording stopRecording(Session session, Recording recording, EndReason reason,
        boolean forceAfterKmsRestart) {
    log.info("Stopping individual ({}) recording {} of session {}. Reason: {}",
            recording.hasVideo() ? (recording.hasAudio() ? "video+audio" : "video-only") : "audioOnly",
            recording.getId(), recording.getSessionId(), reason);

    final int numberOfActiveRecorders = recorders.get(recording.getSessionId()).size();
    final CountDownLatch stoppedCountDown = new CountDownLatch(numberOfActiveRecorders);

    for (RecorderEndpointWrapper wrapper : recorders.get(recording.getSessionId()).values()) {
        this.stopRecorderEndpointOfPublisherEndpoint(recording.getSessionId(), wrapper.getStreamId(),
                stoppedCountDown, forceAfterKmsRestart);
    }/*from   w ww . j a va 2 s .  c om*/
    try {
        if (!stoppedCountDown.await(5, TimeUnit.SECONDS)) {
            recording.setStatus(io.openvidu.java.client.Recording.Status.failed);
            log.error("Error waiting for some recorder endpoint to stop in session {}",
                    recording.getSessionId());
        }
    } catch (InterruptedException e) {
        recording.setStatus(io.openvidu.java.client.Recording.Status.failed);
        log.error("Exception while waiting for state change", e);
    }

    this.cleanRecordingMaps(recording);
    this.recorders.remove(recording.getSessionId());

    recording = this.sealMetadataFiles(recording);

    if (reason != null && session != null) {
        this.recordingManager.sessionHandler.sendRecordingStoppedNotification(session, recording, reason);
    }

    return recording;
}

From source file:com.vmware.photon.controller.api.client.resource.ClusterApiTest.java

@Test
public void testGetVmsAsync() throws IOException, InterruptedException {
    Vm vm1 = new Vm();
    vm1.setId("vm1");

    Vm vm2 = new Vm();
    vm2.setId("vm2");

    final ResourceList<Vm> vmList = new ResourceList<>(Arrays.asList(vm1, vm2));

    ObjectMapper mapper = new ObjectMapper();
    String serializedTask = mapper.writeValueAsString(vmList);

    setupMocks(serializedTask, HttpStatus.SC_OK);

    ClusterApi clusterApi = new ClusterApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);

    clusterApi.getVmsInClusterAsync("foo", new FutureCallback<ResourceList<Vm>>() {
        @Override//  w ww  .  j a va 2s. c o  m
        public void onSuccess(ResourceList<Vm> result) {
            assertEquals(result.getItems(), vmList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
}

From source file:com.vmware.photon.controller.api.client.resource.ClusterRestApiTest.java

@Test
public void testGetVmsAsync() throws IOException, InterruptedException {
    Vm vm1 = new Vm();
    vm1.setId("vm1");

    Vm vm2 = new Vm();
    vm2.setId("vm2");

    final ResourceList<Vm> vmList = new ResourceList<>(Arrays.asList(vm1, vm2));

    ObjectMapper mapper = new ObjectMapper();
    String serializedTask = mapper.writeValueAsString(vmList);

    setupMocks(serializedTask, HttpStatus.SC_OK);

    ClusterApi clusterApi = new ClusterRestApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);

    clusterApi.getVmsInClusterAsync("foo", new FutureCallback<ResourceList<Vm>>() {
        @Override//w ww . j a v a2 s.c o  m
        public void onSuccess(ResourceList<Vm> result) {
            assertEquals(result.getItems(), vmList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
}

From source file:com.vmware.photon.controller.nsxclient.apis.DhcpServiceApiTest.java

@Test
public void testCreateDhcpRelayProfile() throws IOException, InterruptedException {
    final DhcpRelayProfile mockResponse = new DhcpRelayProfile();
    mockResponse.setId("id");
    mockResponse.setResourceType(ServiceProfileResourceType.DHCP_RELAY_PROFILE);
    setupMocks(objectMapper.writeValueAsString(mockResponse), HttpStatus.SC_CREATED);

    DhcpServiceApi client = new DhcpServiceApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);
    client.createDhcpRelayProfile(new DhcpRelayProfileCreateSpec(),
            new com.google.common.util.concurrent.FutureCallback<DhcpRelayProfile>() {
                @Override//from  w  w  w  . j  a  v a  2  s  . c  o  m
                public void onSuccess(DhcpRelayProfile result) {
                    assertEquals(result, mockResponse);
                    latch.countDown();
                }

                @Override
                public void onFailure(Throwable t) {
                    fail(t.toString());
                    latch.countDown();
                }
            });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
}