List of usage examples for java.util.concurrent Semaphore tryAcquire
public boolean tryAcquire(long timeout, TimeUnit unit) throws InterruptedException
From source file:org.jboss.pnc.jenkinsbuilddriver.test.JenkinsDriverRemoteTest.java
@Test //@Ignore("To be fixed by NCL-554") public void startJenkinsJobTestCase() throws Exception { BuildConfigurationAudited pbc = getBuildConfiguration(); RunningEnvironment runningEnvironment = getRunningEnvironment(); final Semaphore mutex = new Semaphore(1); ObjectWrapper<Boolean> completed = new ObjectWrapper<>(false); ObjectWrapper<BuildDriverResult> resultWrapper = new ObjectWrapper<>(); ObjectWrapper<Long> buildStarted = new ObjectWrapper<>(); ObjectWrapper<Long> buildTook = new ObjectWrapper<>(); class BuildTask { CompletedBuild buildJobDetails;/*from w w w .j av a 2 s. com*/ } final BuildTask buildTask = new BuildTask(); Consumer<CompletedBuild> onComplete = (completedBuild) -> { buildTask.buildJobDetails = completedBuild; completed.set(true); buildTook.set(System.currentTimeMillis() - buildStarted.get()); log.info("Received build completed in " + buildTook.get() + "ms."); try { resultWrapper.set(completedBuild.getBuildResult()); } catch (BuildDriverException e) { throw new AssertionError("Cannot get build result.", e); } mutex.release(); }; Consumer<Throwable> onError = (e) -> { throw new AssertionError(e); }; mutex.acquire(); RunningBuild runningBuild = jenkinsBuildDriver.startProjectBuild(mock(BuildExecution.class), pbc, runningEnvironment); buildStarted.set(System.currentTimeMillis()); runningBuild.monitor(onComplete, onError); mutex.tryAcquire(60, TimeUnit.SECONDS); // wait for callback to release Assert.assertTrue("There was no complete callback.", completed.get()); Assert.assertNotNull(buildTask.buildJobDetails); long minBuildTime = 5000; Assert.assertTrue( "Received build completed in " + buildTook.get() + " while expected >" + minBuildTime + ".", buildTook.get() >= minBuildTime); BuildDriverResult buildDriverResult = resultWrapper.get(); Assert.assertEquals(BuildDriverStatus.SUCCESS, buildDriverResult.getBuildDriverStatus()); Assert.assertTrue("Incomplete build log.", buildDriverResult.getBuildLog().contains("Building in workspace")); Assert.assertTrue("Incomplete build log.", buildDriverResult.getBuildLog().contains("Finished: SUCCESS")); Assert.assertTrue("There was no complete callback.", completed.get()); }
From source file:org.wso2.carbon.cloud.gateway.transport.CGTransportSender.java
@Override public void sendMessage(MessageContext msgContext, String targetEPR, OutTransportInfo outTransportInfo) throws AxisFault { try {/*from ww w .ja v a2 s . c o m*/ String requestUri = (String) msgContext.getProperty(Constants.Configuration.TRANSPORT_IN_URL); if (requestUri == null) { handleException("The request URI is null"); } String endpointPrefix = (String) msgContext.getProperty(NhttpConstants.ENDPOINT_PREFIX); if (endpointPrefix == null) { handleException("The ENDPOINT_PREFIX(EPR) is not found"); } Object headers = msgContext.getProperty(org.apache.axis2.context.MessageContext.TRANSPORT_HEADERS); if (headers == null) { handleException("Transport headers are null"); } String requestMsgIdMsgId = msgContext.getMessageID(); if (requestMsgIdMsgId == null) { requestMsgIdMsgId = UUID.randomUUID().toString(); } Message thriftMsg = new Message(); if (msgContext.isDoingMTOM()) { thriftMsg.setIsDoingMTOM(msgContext.isDoingMTOM()); msgContext.setProperty(org.apache.axis2.Constants.Configuration.ENABLE_MTOM, org.apache.axis2.Constants.VALUE_TRUE); } else if (msgContext.isDoingSwA()) { thriftMsg.setIsDoingSwA(msgContext.isDoingSwA()); msgContext.setProperty(org.apache.axis2.Constants.Configuration.ENABLE_SWA, org.apache.axis2.Constants.VALUE_TRUE); } else if (msgContext.isDoingREST()) { thriftMsg.setIsDoingREST(msgContext.isDoingREST()); } thriftMsg.setHttpMethod((String) msgContext.getProperty(Constants.Configuration.HTTP_METHOD)); thriftMsg.setMessageId(requestMsgIdMsgId); thriftMsg.setEpoch(System.currentTimeMillis()); // a class cast exception (if any) will be logged in case mismatch type is returned, // we will not worry about the type because correct type should be returned thriftMsg.setRequestURI(requestUri); thriftMsg.setSoapAction(msgContext.getSoapAction()); OMOutputFormat format = BaseUtils.getOMOutputFormat(msgContext); ByteArrayOutputStream out = new ByteArrayOutputStream(); formatter.writeTo(msgContext, format, out, false); thriftMsg.setMessage(out.toByteArray()); String contentType = formatter.getContentType(msgContext, format, msgContext.getSoapAction()); thriftMsg.setContentType(contentType); if (((Map) headers).containsKey(HTTP.CONTENT_TYPE)) { ((Map) headers).put(HTTP.CONTENT_TYPE, contentType); } thriftMsg.setTransportHeaders((Map) headers); Semaphore available = null; // The csg polling transport on the other side will directly use the EPR as the key for // message buffer. Although this introduce a tight couple between the CGTransport // and CGPollingTransport this is done this way to achieve maximum performance String token = CGThriftServerHandler.getSecureUUID(endpointPrefix); if (token == null) { handleException("No permission to access the server buffers"); } boolean isOutIn = waitForSynchronousResponse(msgContext); if (isOutIn) { available = new Semaphore(0, true); CGThriftServerHandler.getSemaphoreMap().put(requestMsgIdMsgId, available); } CGThriftServerHandler.addRequestMessage(thriftMsg, token); try { if (isOutIn) { // wait until the response is available, this thread will signal by the // semaphore checking thread or send a timeout error if there is no response // with the configured semaphore timeout or if the semaphore received an // interrupted exception try { available.tryAcquire(semaphoreTimeOut, TimeUnit.SECONDS); } catch (InterruptedException ignore) { } // make sure we don't run out of the main memory CGThriftServerHandler.getSemaphoreMap().remove(requestMsgIdMsgId); Message msg = CGThriftServerHandler.getMiddleBuffer().remove(requestMsgIdMsgId); if (msg != null) { handleSyncResponse(msgContext, msg, contentType); } else { // we don't have a response come yet, so send a fault to client log.warn("The semaphore with id '" + requestMsgIdMsgId + "' was time out while " + "waiting for a response, sending a fault to client.."); sendFault(msgContext, new Exception("Times out occurs while waiting for a response")); } } } catch (Exception e) { handleException("Could not process the response message", e); } } catch (Exception e) { handleException("Could not process the request message", e); } }
From source file:org.jboss.pnc.environment.docker.DockerEnvironmentDriverRemoteTest.java
@Test public void buildDestroyEnvironmentTest() throws EnvironmentDriverException, InterruptedException { final Semaphore mutex = new Semaphore(0); // Create container final DockerStartedEnvironment startedEnv = (DockerStartedEnvironment) dockerEnvDriver .buildEnvironment(BuildType.JAVA, DUMMY_REPOSITORY_CONFIGURATION); Consumer<RunningEnvironment> onComplete = (generalRunningEnv) -> { DockerRunningEnvironment runningEnv = (DockerRunningEnvironment) generalRunningEnv; boolean containerDestroyed = false; try {/*from w w w. j a v a 2s . co m*/ testRunningContainer(runningEnv, true, "Environment wasn't successfully built."); testRunningEnvContainer(runningEnv, true, "Environment wasn't set up correctly."); // Destroy container dockerEnvDriver.destroyEnvironment(runningEnv.getId()); containerDestroyed = true; testRunningContainer(runningEnv, false, "Environment wasn't successfully destroyed."); mutex.release(); } catch (Throwable e) { fail(e.getMessage()); } finally { if (!containerDestroyed) destroyEnvironmentWithReport(runningEnv.getId()); } }; Consumer<Exception> onError = (e) -> { destroyEnvironmentWithReport(startedEnv.getId()); fail("Failed to init docker container. " + e.getMessage()); }; startedEnv.monitorInitialization(onComplete, onError); mutex.tryAcquire(MAX_TEST_DURATION, TimeUnit.SECONDS); }
From source file:com.parse.ParseUserTest.java
@Test public void testLogInWithCallback() throws Exception { // Register a mock currentUserController to make setCurrentUser work ParseCurrentUserController currentUserController = mock(ParseCurrentUserController.class); when(currentUserController.setAsync(any(ParseUser.class))).thenReturn(Task.<Void>forResult(null)); ParseCorePlugins.getInstance().registerCurrentUserController(currentUserController); // Register a mock userController to make logIn work ParseUserController userController = mock(ParseUserController.class); ParseUser.State newUserState = new ParseUser.State.Builder().put("newKey", "newValue") .sessionToken("newSessionToken").build(); when(userController.logInAsync(anyString(), anyString())).thenReturn(Task.forResult(newUserState)); ParseCorePlugins.getInstance().registerUserController(userController); final Semaphore done = new Semaphore(0); ParseUser.logInInBackground("userName", "password", new LogInCallback() { @Override/*from w ww.j ava 2 s . c o m*/ public void done(ParseUser user, ParseException e) { done.release(); assertNull(e); // Make sure user's data is correct assertEquals("newSessionToken", user.getSessionToken()); assertEquals("newValue", user.get("newKey")); } }); assertTrue(done.tryAcquire(5, TimeUnit.SECONDS)); // Make sure user is login verify(userController, times(1)).logInAsync("userName", "password"); // Make sure we set currentUser verify(currentUserController, times(1)).setAsync(any(ParseUser.class)); }
From source file:com.parse.ParseOkHttpClientTest.java
@Test public void testParseOkHttpClientExecuteWithExternalInterceptorAndGZIPResponse() throws Exception { // Make mock response Buffer buffer = new Buffer(); final ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); GZIPOutputStream gzipOut = new GZIPOutputStream(byteOut); gzipOut.write("content".getBytes()); gzipOut.close();/*w w w. j a v a2 s . c om*/ buffer.write(byteOut.toByteArray()); MockResponse mockResponse = new MockResponse().setStatus("HTTP/1.1 " + 201 + " " + "OK").setBody(buffer) .setHeader("Content-Encoding", "gzip"); // Start mock server server.enqueue(mockResponse); server.start(); ParseHttpClient client = new ParseOkHttpClient(10000, null); final Semaphore done = new Semaphore(0); // Add plain interceptor to disable decompress response stream client.addExternalInterceptor(new ParseNetworkInterceptor() { @Override public ParseHttpResponse intercept(Chain chain) throws IOException { done.release(); ParseHttpResponse parseResponse = chain.proceed(chain.getRequest()); // Make sure the response we get from the interceptor is the raw gzip stream byte[] content = ParseIOUtils.toByteArray(parseResponse.getContent()); assertArrayEquals(byteOut.toByteArray(), content); // We need to set a new stream since we have read it return new ParseHttpResponse.Builder().setContent(new ByteArrayInputStream(byteOut.toByteArray())) .build(); } }); // We do not need to add Accept-Encoding header manually, httpClient library should do that. String requestUrl = server.getUrl("/").toString(); ParseHttpRequest parseRequest = new ParseHttpRequest.Builder().setUrl(requestUrl) .setMethod(ParseHttpRequest.Method.GET).build(); // Execute request ParseHttpResponse parseResponse = client.execute(parseRequest); // Make sure the response we get is ungziped by OkHttp library byte[] content = ParseIOUtils.toByteArray(parseResponse.getContent()); assertArrayEquals("content".getBytes(), content); // Make sure interceptor is called assertTrue(done.tryAcquire(10, TimeUnit.SECONDS)); server.shutdown(); }
From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java
@Test public void testStop() throws InterruptedException { _wireMockRule/*from www.j av a2s . c o m*/ .stubFor(WireMock.post(WireMock.urlEqualTo(PATH)).willReturn(WireMock.aResponse().withStatus(200))); final Semaphore semaphore = new Semaphore(0); @SuppressWarnings("unchecked") final ApacheHttpSink sink = (ApacheHttpSink) new ApacheHttpSink.Builder() .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)) .setEventHandler(new CompletionHandler(semaphore)).build(); final Map<String, String> annotations = new LinkedHashMap<>(); annotations.put("foo", "bar"); annotations.put("_start", Instant.now().minusMillis(812).atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT)); annotations.put("_end", Instant.now().atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_INSTANT)); annotations.put("_host", "some.host.com"); annotations.put("_service", "myservice"); annotations.put("_cluster", "mycluster"); annotations.put("_id", UUID.randomUUID().toString()); final TsdEvent event = new TsdEvent(annotations, createQuantityMap("timer", TsdQuantity.newInstance(123, Units.NANOSECOND)), createQuantityMap("counter", TsdQuantity.newInstance(8, null)), createQuantityMap("gauge", TsdQuantity.newInstance(10, Units.BYTE))); sink.stop(); Thread.sleep(1000); sink.record(event); Assert.assertFalse(semaphore.tryAcquire(1, TimeUnit.SECONDS)); // Request matcher final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH)) .withHeader("Content-Type", WireMock.equalTo("application/octet-stream")); // Assert that data was sent _wireMockRule.verify(0, requestPattern); Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty()); }
From source file:voldemort.store.routed.RoutedStore.java
public boolean delete(final ByteArray key, final Version version) throws VoldemortException { StoreUtils.assertValidKey(key);/*from w w w . ja v a 2 s. c o m*/ final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get())); // quickly fail if there aren't enough live nodes to meet the // requirements final int numNodes = nodes.size(); if (numNodes < this.storeDef.getRequiredWrites()) throw new InsufficientOperationalNodesException("Only " + numNodes + " nodes in preference list, but " + this.storeDef.getRequiredWrites() + " writes required."); // A count of the number of successful operations final AtomicInteger successes = new AtomicInteger(0); final AtomicBoolean deletedSomething = new AtomicBoolean(false); // A list of thrown exceptions, indicating the number of failures final List<Exception> failures = Collections.synchronizedList(new LinkedList<Exception>()); // A semaphore indicating the number of completed operations // Once inititialized all permits are acquired, after that // permits are released when an operation is completed. // semaphore.acquire(n) waits for n operations to complete final Semaphore semaphore = new Semaphore(0, false); // Add the operations to the pool for (final Node node : nodes) { this.executor.execute(new Runnable() { public void run() { long startNs = System.nanoTime(); try { boolean deleted = innerStores.get(node.getId()).delete(key, version); successes.incrementAndGet(); deletedSomething.compareAndSet(false, deleted); recordSuccess(node, startNs); } catch (UnreachableStoreException e) { failures.add(e); recordException(node, startNs, e); } catch (VoldemortApplicationException e) { throw e; } catch (Exception e) { failures.add(e); logger.warn("Error in DELETE on node " + node.getId() + "(" + node.getHost() + ")", e); } finally { // signal that the operation is complete semaphore.release(); } } }); } int attempts = Math.min(storeDef.getPreferredWrites(), numNodes); if (this.storeDef.getPreferredWrites() <= 0) { return true; } else { for (int i = 0; i < numNodes; i++) { try { boolean acquired = semaphore.tryAcquire(timeoutMs, TimeUnit.MILLISECONDS); if (!acquired) logger.warn("Delete operation timed out waiting for operation " + i + " to complete after waiting " + timeoutMs + " ms."); // okay, at least the required number of operations have // completed, were they successful? if (successes.get() >= attempts) return deletedSomething.get(); } catch (InterruptedException e) { throw new InsufficientOperationalNodesException("Delete operation interrupted!", e); } } } // If we get to here, that means we couldn't hit the preferred number // of writes, throw an exception if you can't even hit the required // number if (successes.get() < storeDef.getRequiredWrites()) throw new InsufficientOperationalNodesException( this.storeDef.getRequiredWrites() + " deletes required, but " + successes.get() + " succeeded.", failures); else return deletedSomething.get(); }
From source file:com.baifendian.swordfish.execserver.runner.flow.FlowRunner.java
/** * ? DAG: 1. ? start 2. , ??? 3. , ?, , END, , 4 4. * , ?, ?, ?, 5 5. ?, ??, ??, 2 6. ?, SUCCESS <p> * END: ??, ; ??/*from ww w.j a v a 2 s.c om*/ */ private FlowStatus runFlow(Graph<String, FlowNode, FlowNodeRelation> dagGraph) { // ??, ?? Semaphore semaphore = new Semaphore(0); // dagGraph ??, ?? try { for (String nodeName : dagGraph.topologicalSort()) { ExecutionNode executionNode = flowDao.queryExecutionNode(executionFlow.getId(), nodeName); // ? if (executionNode != null && executionNode.getStatus().typeIsFinished()) { dagGraph.removeVertex(nodeName); } } } catch (Exception e) { logger.error("Get topological of graph failed.", e); return FlowStatus.FAILED; } // Collection<String> startVertex = dagGraph.getStartVertex(); // ?? for (String nodeName : startVertex) { if (!executionNodeMap.containsKey(nodeName)) { // ? ExecutionNode executionNode = insertExecutionNode(executionFlow, nodeName); // executionNodeMap.put(nodeName, executionNode); // ?? submitNodeRunner(dagGraph.getVertex(nodeName), executionNode, semaphore); } } // ? FlowStatus status = FlowStatus.SUCCESS; // ?, while (!activeNodeRunners.isEmpty()) { boolean acquire = false; try { // , , acquire = semaphore.tryAcquire(calcNodeTimeout(), TimeUnit.SECONDS); } catch (InterruptedException e) { logger.error(e.getMessage(), e); } catch (ExecTimeoutException e) { logger.error(e.getMessage(), e); } // ?, ? if (!acquire) { clean(true); return FlowStatus.FAILED; } // ?, ? boolean done = false; while (!done) { // ? try { Thread.sleep(50); } catch (InterruptedException e) { logger.error(e.getMessage(), e); } // ??, ?? for (Map.Entry<NodeRunner, Future<Boolean>> entry : activeNodeRunners.entrySet()) { NodeRunner nodeRunner = entry.getKey(); Future<Boolean> future = entry.getValue(); // ? if (future.isDone()) { // ? done = true; // , activeNodeRunners.remove(nodeRunner); Boolean value = false; Date now = new Date(); try { value = future.get(); } catch (CancellationException e) { logger.error("task has been cancel"); // ? clean(true); return FlowStatus.KILL; } catch (InterruptedException e) { logger.error(e.getMessage(), e); } catch (ExecutionException e) { logger.error(e.getMessage(), e); } // if (!value) { // ?, ??? ExecutionNode executionNode = executionNodeMap.get(nodeRunner.getNodename()); // , 2, ?? 2 if (executionNode.getAttempt() < maxTryTimes) { executionNode.incAttempt(); // ? flowDao.updateExecutionNode(executionNode); // ??? submitNodeRunner(dagGraph.getVertex(nodeRunner.getNodename()), executionNode, semaphore); } else { // ?? status = FlowStatus.FAILED; executionNode.setEndTime(now); executionNode.setStatus(status); // ? flowDao.updateExecutionNode(executionNode); if (failurePolicyType == FailurePolicyType.END) { clean(true); return status; } } } else { // ? // ? ExecutionNode executionNode = executionNodeMap.get(nodeRunner.getNodename()); executionNode.setEndTime(now); executionNode.setStatus(FlowStatus.SUCCESS); flowDao.updateExecutionNode(executionNode); // ?, ?, ??? for (String nodeName : dagGraph.getPostNode(nodeRunner.getNodename())) { if (!executionNodeMap.containsKey(nodeName) && isPreNodesAllSuccess(dagGraph.getPreNode(nodeName))) { // ? ExecutionNode newExecutionNode = insertExecutionNode(executionFlow, nodeName); // executionNodeMap.put(nodeName, newExecutionNode); // ?? submitNodeRunner(dagGraph.getVertex(nodeName), newExecutionNode, semaphore); } } } break; } } } } return status; }
From source file:voldemort.store.routed.ThreadPoolRoutedStore.java
@Override public boolean delete(final ByteArray key, final Version version) throws VoldemortException { StoreUtils.assertValidKey(key);/* w w w . j av a 2s . c om*/ final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get())); // quickly fail if there aren't enough live nodes to meet the // requirements final int numNodes = nodes.size(); if (numNodes < this.storeDef.getRequiredWrites()) throw new InsufficientOperationalNodesException("Only " + numNodes + " nodes in preference list, but " + this.storeDef.getRequiredWrites() + " writes required."); // A count of the number of successful operations final AtomicInteger successes = new AtomicInteger(0); final AtomicBoolean deletedSomething = new AtomicBoolean(false); // A list of thrown exceptions, indicating the number of failures final List<Exception> failures = Collections.synchronizedList(new LinkedList<Exception>()); // A semaphore indicating the number of completed operations // Once inititialized all permits are acquired, after that // permits are released when an operation is completed. // semaphore.acquire(n) waits for n operations to complete final Semaphore semaphore = new Semaphore(0, false); // Add the operations to the pool for (final Node node : nodes) { this.executor.execute(new Runnable() { @Override public void run() { long startNs = System.nanoTime(); try { boolean deleted = innerStores.get(node.getId()).delete(key, version); successes.incrementAndGet(); deletedSomething.compareAndSet(false, deleted); recordSuccess(node, startNs); } catch (UnreachableStoreException e) { failures.add(e); recordException(node, startNs, e); } catch (VoldemortApplicationException e) { throw e; } catch (Exception e) { failures.add(e); logger.warn("Error in DELETE on node " + node.getId() + "(" + node.getHost() + ")", e); } finally { // signal that the operation is complete semaphore.release(); } } }); } int attempts = Math.min(storeDef.getPreferredWrites(), numNodes); if (this.storeDef.getPreferredWrites() <= 0) { return true; } else { for (int i = 0; i < numNodes; i++) { try { long timeoutMs = timeoutConfig.getOperationTimeout(VoldemortOpCode.DELETE_OP_CODE); boolean acquired = semaphore.tryAcquire(timeoutMs, TimeUnit.MILLISECONDS); if (!acquired) logger.warn("Delete operation timed out waiting for operation " + i + " to complete after waiting " + timeoutMs + " ms."); // okay, at least the required number of operations have // completed, were they successful? if (successes.get() >= attempts) return deletedSomething.get(); } catch (InterruptedException e) { throw new InsufficientOperationalNodesException("Delete operation interrupted!", e); } } } // If we get to here, that means we couldn't hit the preferred number // of writes, throw an exception if you can't even hit the required // number if (successes.get() < storeDef.getRequiredWrites()) throw new InsufficientOperationalNodesException( this.storeDef.getRequiredWrites() + " deletes required, but " + successes.get() + " succeeded.", failures); else return deletedSomething.get(); }
From source file:android.webkit.cts.WebViewTest.java
private void doSaveWebArchive(String baseName, boolean autoName, final String expectName) throws Throwable { final Semaphore saving = new Semaphore(0); ValueCallback<String> callback = new ValueCallback<String>() { @Override//from w w w. j a va 2 s .c o m public void onReceiveValue(String savedName) { assertEquals(expectName, savedName); saving.release(); } }; mOnUiThread.saveWebArchive(baseName, autoName, callback); assertTrue(saving.tryAcquire(TEST_TIMEOUT, TimeUnit.MILLISECONDS)); }