Example usage for java.util.concurrent Semaphore acquire

List of usage examples for java.util.concurrent Semaphore acquire

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore acquire.

Prototype

public void acquire() throws InterruptedException 

Source Link

Document

Acquires a permit from this semaphore, blocking until one is available, or the thread is Thread#interrupt interrupted .

Usage

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

/**
 * Perform asynchronous operations nodes.
 * //from  w  w  w  .  j a  v a  2 s.c om
 * @param nodeList
 *            {@link Collection}
 * @return <code>true</code>, if successful
 */
private boolean validate(Collection<String> nodeList) throws AnkushException {
    try {
        // Create semaphore to join threads
        final Semaphore semaphore = new Semaphore(nodeList.size());
        for (final String host : nodeList) {
            final NodeConfig nodeConf = clusterConf.getNodes().get(host);
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        nodeConf.setStatus(new GangliaValidator(clusterConf, nodeConf).validate());
                    } catch (AnkushException e) {
                        addClusterError(e.getMessage(), host, e);
                    } catch (Exception e) {
                        addClusterError("There is some exception while validating " + host + " for "
                                + getComponentName() + " deployment. " + GangliaConstants.EXCEPTION_STRING,
                                host, e);
                    } finally {
                        if (semaphore != null) {
                            semaphore.release();
                        }
                    }
                }
            });
        }
        semaphore.acquire(nodeList.size());

    } catch (Exception e) {
        throw new AnkushException("There is some exception while validating nodes for " + getComponentName()
                + " deployment." + GangliaConstants.EXCEPTION_STRING, e);
    }
    return AnkushUtils.getStatus(clusterConf, nodeList);
}

From source file:com.amazonaws.services.sqs.buffered.SendQueueBuffer.java

/**
 * Submits an outbound request for delivery to the queue associated with
 * this buffer./*from w  w w . j  av  a  2  s. c  om*/
 * <p>
 *
 * @param operationLock
 *            the lock synchronizing calls for the call type (
 *            {@code sendMessage}, {@code deleteMessage},
 *            {@code changeMessageVisibility} )
 * @param openOutboundBatchTask
 *            the open batch task for this call type
 * @param request
 *            the request to submit
 * @param inflightOperationBatches
 *            the permits controlling the batches for this type of request
 * @return never null
 * @throws AmazonClientException
 *             (see the various outbound calls for details)
 */
@SuppressWarnings("unchecked")
<OBT extends OutboundBatchTask<R, Result>, R extends AmazonWebServiceRequest, Result> QueueBufferFuture<R, Result> submitOutboundRequest(
        Object operationLock, OBT[] openOutboundBatchTask, R request, final Semaphore inflightOperationBatches,
        QueueBufferCallback<R, Result> callback) {
    /*
     * Callers add requests to a single batch task (openOutboundBatchTask)
     * until it is full or maxBatchOpenMs elapses. The total number of batch
     * task in flight is controlled by the inflightOperationBatch semaphore
     * capped at maxInflightOutboundBatches.
     */
    QueueBufferFuture<R, Result> theFuture = null;
    try {
        synchronized (operationLock) {
            if (openOutboundBatchTask[0] == null
                    || ((theFuture = openOutboundBatchTask[0].addRequest(request, callback))) == null) {
                OBT obt = (OBT) newOutboundBatchTask(request);
                inflightOperationBatches.acquire();
                openOutboundBatchTask[0] = obt;
                // Register a listener for the event signaling that the
                // batch task has completed (successfully or not).
                openOutboundBatchTask[0].onCompleted = new Listener<OutboundBatchTask<R, Result>>() {
                    public void invoke(OutboundBatchTask<R, Result> task) {
                        inflightOperationBatches.release();
                    }
                };

                if (log.isTraceEnabled()) {
                    log.trace("Queue " + qUrl + " created new batch for " + request.getClass().toString() + " "
                            + inflightOperationBatches.availablePermits() + " free slots remain");
                }

                theFuture = openOutboundBatchTask[0].addRequest(request, callback);
                executor.execute(openOutboundBatchTask[0]);
                if (null == theFuture) {
                    //this can happen only if the request itself is flawed,
                    //so that it can't be added to any batch, even a brand
                    //new one
                    throw new AmazonClientException("Failed to schedule request " + request + " for execution");
                }
            }
        }

    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        AmazonClientException toThrow = new AmazonClientException("Interrupted while waiting for lock.");
        toThrow.initCause(e);
        throw toThrow;
    }

    return theFuture;
}

From source file:org.jboss.pnc.jenkinsbuilddriver.test.JenkinsDriverRemoteTest.java

@Test
//@Ignore("To be fixed by NCL-554")
public void startJenkinsJobTestCase() throws Exception {
    BuildConfigurationAudited pbc = getBuildConfiguration();

    RunningEnvironment runningEnvironment = getRunningEnvironment();

    final Semaphore mutex = new Semaphore(1);
    ObjectWrapper<Boolean> completed = new ObjectWrapper<>(false);
    ObjectWrapper<BuildDriverResult> resultWrapper = new ObjectWrapper<>();
    ObjectWrapper<Long> buildStarted = new ObjectWrapper<>();
    ObjectWrapper<Long> buildTook = new ObjectWrapper<>();

    class BuildTask {
        CompletedBuild buildJobDetails;// w ww  .  ja v  a2s.  com
    }

    final BuildTask buildTask = new BuildTask();

    Consumer<CompletedBuild> onComplete = (completedBuild) -> {
        buildTask.buildJobDetails = completedBuild;
        completed.set(true);
        buildTook.set(System.currentTimeMillis() - buildStarted.get());
        log.info("Received build completed in " + buildTook.get() + "ms.");

        try {
            resultWrapper.set(completedBuild.getBuildResult());
        } catch (BuildDriverException e) {
            throw new AssertionError("Cannot get build result.", e);
        }

        mutex.release();
    };

    Consumer<Throwable> onError = (e) -> {
        throw new AssertionError(e);
    };

    mutex.acquire();
    RunningBuild runningBuild = jenkinsBuildDriver.startProjectBuild(mock(BuildExecution.class), pbc,
            runningEnvironment);
    buildStarted.set(System.currentTimeMillis());
    runningBuild.monitor(onComplete, onError);
    mutex.tryAcquire(60, TimeUnit.SECONDS); // wait for callback to release

    Assert.assertTrue("There was no complete callback.", completed.get());
    Assert.assertNotNull(buildTask.buildJobDetails);

    long minBuildTime = 5000;
    Assert.assertTrue(
            "Received build completed in " + buildTook.get() + " while expected >" + minBuildTime + ".",
            buildTook.get() >= minBuildTime);

    BuildDriverResult buildDriverResult = resultWrapper.get();

    Assert.assertEquals(BuildDriverStatus.SUCCESS, buildDriverResult.getBuildDriverStatus());
    Assert.assertTrue("Incomplete build log.",
            buildDriverResult.getBuildLog().contains("Building in workspace"));
    Assert.assertTrue("Incomplete build log.", buildDriverResult.getBuildLog().contains("Finished: SUCCESS"));

    Assert.assertTrue("There was no complete callback.", completed.get());
}

From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerTest.java

/**
 * This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of
 * {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads.
 * This behavior makes the test a bit racy, since we need to ensure a specific order of events.
 * //from w  w  w.ja  va  2s  .c o m
 * @throws Exception
 */
@Test
public final void testWorkerForcefulShutdown() throws Exception {
    final List<Shard> shardList = createShardListWithOneShard();
    final boolean callProcessRecordsForEmptyRecordList = true;
    final long failoverTimeMillis = 50L;
    final int numberOfRecordsPerShard = 10;

    final List<KinesisClientLease> initialLeases = new ArrayList<KinesisClientLease>();
    for (Shard shard : shardList) {
        KinesisClientLease lease = ShardSyncer.newKCLLease(shard);
        lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON);
        initialLeases.add(lease);
    }

    final File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard,
            "normalShutdownUnitTest");
    final IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());

    // Get executor service that will be owned by the worker, so we can get interrupts.
    ExecutorService executorService = getWorkerThreadPoolExecutor();

    // Make test case as efficient as possible.
    final CountDownLatch processRecordsLatch = new CountDownLatch(1);
    final AtomicBoolean recordProcessorInterrupted = new AtomicBoolean(false);
    when(v2RecordProcessorFactory.createProcessor()).thenReturn(v2RecordProcessor);
    final Semaphore actionBlocker = new Semaphore(1);
    final Semaphore shutdownBlocker = new Semaphore(1);

    actionBlocker.acquire();

    doAnswer(new Answer<Object>() {
        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            // Signal that record processor has started processing records.
            processRecordsLatch.countDown();

            // Block for some time now to test forceful shutdown. Also, check if record processor
            // was interrupted or not.
            final long startTimeMillis = System.currentTimeMillis();
            long elapsedTimeMillis = 0;

            LOG.info("Entering sleep @ " + startTimeMillis + " with elapsedMills: " + elapsedTimeMillis);
            shutdownBlocker.acquire();
            try {
                actionBlocker.acquire();
            } catch (InterruptedException e) {
                LOG.info("Sleep interrupted @ " + System.currentTimeMillis() + " elapsedMillis: "
                        + (System.currentTimeMillis() - startTimeMillis));
                recordProcessorInterrupted.getAndSet(true);
            }
            shutdownBlocker.release();
            elapsedTimeMillis = System.currentTimeMillis() - startTimeMillis;
            LOG.info(
                    "Sleep completed @ " + System.currentTimeMillis() + " elapsedMillis: " + elapsedTimeMillis);

            return null;
        }
    }).when(v2RecordProcessor).processRecords(any(ProcessRecordsInput.class));

    WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList,
            failoverTimeMillis, numberOfRecordsPerShard, fileBasedProxy, v2RecordProcessorFactory,
            executorService, nullMetricsFactory);

    // Only sleep for time that is required.
    processRecordsLatch.await();

    // Make sure record processor is initialized and processing records.
    verify(v2RecordProcessorFactory, times(1)).createProcessor();
    verify(v2RecordProcessor, times(1)).initialize(any(InitializationInput.class));
    verify(v2RecordProcessor, atLeast(1)).processRecords(any(ProcessRecordsInput.class));
    verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class));

    workerThread.getWorker().shutdown();
    workerThread.join();

    Assert.assertTrue(workerThread.getState() == State.TERMINATED);
    // Shutdown should not be called in this case because record processor is blocked.
    verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class));

    //
    // Release the worker thread
    //
    actionBlocker.release();
    //
    // Give the worker thread time to execute it's interrupted handler.
    //
    shutdownBlocker.tryAcquire(100, TimeUnit.MILLISECONDS);
    //
    // Now we can see if it was actually interrupted. It's possible it wasn't and this will fail.
    //
    assertThat(recordProcessorInterrupted.get(), equalTo(true));
}

From source file:edu.illinois.enforcemop.examples.jbosscache.PessimisticSyncReplTxTest.java

/**
 * Have both cache1 and cache2 do add and commit. cache1 commit should time
 * out since it can't obtain the lock when trying to replicate cache2. On the
 * other hand, cache2 commit will succeed since now that cache1 is rollbacked
 * and lock is released.//from   w ww .  j a  v  a2 s . c om
 */
public void testPutTx1() throws Exception {
    final CacheSPI<Object, Object> c1 = this.cache1;

    final Semaphore threadOneFirstPart = new Semaphore(0);
    final Semaphore threadTwoFirstPart = new Semaphore(0);
    final Semaphore threadOneSecondPart = new Semaphore(0);

    Thread t1 = new Thread() {
        public void run() {
            TransactionManager tm;

            try {
                tm = beginTransaction();
                c1.put("/a/b/c", "age", 38);
                c1.put("/a/b/c", "age", 39);
                threadOneFirstPart.release();

                threadTwoFirstPart.acquire();
                try {
                    tm.commit();
                } catch (RollbackException ex) {
                } finally {
                    threadOneSecondPart.release();
                }
            } catch (Throwable ex) {
                ex.printStackTrace();
                t1_ex = ex;
            }
        }
    };

    Thread t2 = new Thread() {
        public void run() {
            TransactionManager tm;

            try {
                threadOneFirstPart.acquire();
                tm = beginTransaction();
                assertNull(cache2.get("/a/b/c", "age"));// must be null as not yet
                                                        // committed
                cache2.put("/a/b/c", "age", 40);

                threadTwoFirstPart.release();

                threadOneSecondPart.acquire();
                assertEquals(40, cache2.get("/a/b/c", "age"));// must not be null
                tm.commit();

                tm = beginTransaction();
                assertEquals("After cache2 commit", 40, cache2.get("/a/b/c", "age"));
                tm.commit();
            } catch (Throwable ex) {
                ex.printStackTrace();
                t2_ex = ex;
            } finally {
                lock.release();
            }
        }
    };

    // Let the game start
    t1.start();
    t2.start();

    t1.join();
    t2.join();

    if (t1_ex != null) {
        fail("Thread1 failed: " + t1_ex);
    }
    if (t2_ex != null) {
        fail("Thread2 failed: " + t2_ex);
    }
}

From source file:com.thoughtworks.go.server.service.BackupServiceIntegrationTest.java

@Test
public void shouldExecutePostBackupScriptAndReturnResultOnSuccess() throws InterruptedException {
    final Semaphore waitForBackupToComplete = new Semaphore(1);
    GoConfigService configService = mock(GoConfigService.class);
    ServerConfig serverConfig = new ServerConfig();
    serverConfig.setBackupConfig(new BackupConfig(null, "jcmd", false, false));
    when(configService.serverConfig()).thenReturn(serverConfig);
    GoMailSender goMailSender = mock(GoMailSender.class);
    when(configService.getMailSender()).thenReturn(goMailSender);
    when(configService.adminEmail()).thenReturn("mail@admin.com");
    when(configService.isUserAdmin(admin)).thenReturn(true);
    TimeProvider timeProvider = mock(TimeProvider.class);
    DateTime now = new DateTime();
    when(timeProvider.currentDateTime()).thenReturn(now);

    final MessageCollectingBackupUpdateListener backupUpdateListener = new MessageCollectingBackupUpdateListener(
            waitForBackupToComplete);/*  w w  w  .  ja v  a 2 s  .  c  om*/

    waitForBackupToComplete.acquire();
    backupService = new BackupService(artifactsDirHolder, configService, timeProvider, backupInfoRepository,
            systemEnvironment, configRepository, databaseStrategy, backupQueue);
    Thread backupThd = new Thread(() -> backupService.startBackup(admin, backupUpdateListener));

    backupThd.start();
    waitForBackupToComplete.acquire();
    assertThat(backupUpdateListener.getMessages()
            .contains(BackupProgressStatus.POST_BACKUP_SCRIPT_COMPLETE.getMessage()), is(true));
    backupThd.join();
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testPostBadHost() throws InterruptedException {
    final org.slf4j.Logger logger = Mockito.mock(org.slf4j.Logger.class);
    final Semaphore semaphore = new Semaphore(0);
    final Sink sink = new ApacheHttpSink(
            new ApacheHttpSink.Builder().setUri(URI.create("http://nohost.example.com" + PATH))
                    .setEventHandler(new CompletionHandler(semaphore)),
            logger);// w w  w .j a  v a 2s . c  o m

    final TsdEvent event = new TsdEvent(ANNOTATIONS, TEST_EMPTY_SERIALIZATION_TIMERS,
            TEST_EMPTY_SERIALIZATION_COUNTERS, TEST_EMPTY_SERIALIZATION_GAUGES);

    sink.record(event);
    semaphore.acquire();

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that no data was sent
    _wireMockRule.verify(0, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());

    // Assert that an IOException was captured
    Mockito.verify(logger).error(
            Mockito.startsWith("Encountered failure when sending metrics to HTTP endpoint; uri="),
            Mockito.any(IOException.class));
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testCompoundUnits() throws InterruptedException {
    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        Assert.assertEquals(0, r.getTimersCount());
        Assert.assertEquals(0, r.getTimersCount());
        assertSample(r.getGaugesList(), "gauge", 10d, ClientV2.Unit.Type.Value.BIT,
                ClientV2.Unit.Scale.Value.UNIT, ClientV2.Unit.Type.Value.SECOND,
                ClientV2.Unit.Scale.Value.UNIT);
    })).willReturn(WireMock.aResponse().withStatus(200)));

    final Semaphore semaphore = new Semaphore(0);
    final Sink sink = new ApacheHttpSink.Builder()
            .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
            .setEventHandler(new CompletionHandler(semaphore)).build();

    final TsdEvent event = new TsdEvent(Collections.emptyMap(), createQuantityMap(), createQuantityMap(),
            createQuantityMap("gauge", TsdQuantity.newInstance(10d, Units.BITS_PER_SECOND)));

    sink.record(event);/* w ww.j ava2s.c o m*/
    semaphore.acquire();

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testHttpClientExecuteException() throws InterruptedException {
    final CloseableHttpClient httpClient = Mockito.mock(CloseableHttpClient.class, invocationOnMock -> {
        throw new NullPointerException("Throw by default");
    });//  w  ww  . jav a 2 s  .  c om

    final org.slf4j.Logger logger = Mockito.mock(org.slf4j.Logger.class);
    final Semaphore semaphore = new Semaphore(0);
    final Sink sink = new ApacheHttpSink(
            new ApacheHttpSink.Builder().setUri(URI.create("http://nohost.example.com" + PATH))
                    .setEventHandler(new CompletionHandler(semaphore)),
            () -> httpClient, logger);

    final TsdEvent event = new TsdEvent(ANNOTATIONS, TEST_EMPTY_SERIALIZATION_TIMERS,
            TEST_EMPTY_SERIALIZATION_COUNTERS, TEST_EMPTY_SERIALIZATION_GAUGES);

    sink.record(event);
    semaphore.acquire();

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that no data was sent
    _wireMockRule.verify(0, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());

    // Assert that the runtime exception was captured
    Mockito.verify(logger).error(
            Mockito.startsWith("Encountered failure when sending metrics to HTTP endpoint; uri="),
            Mockito.any(NullPointerException.class));
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphoreCluster.java

@Test
public void testKilledServerWithEnsembleProvider() throws Exception {
    final int CLIENT_QTY = 10;
    final Timing timing = new Timing();
    final String PATH = "/foo/bar/lock";

    ExecutorService executorService = Executors.newFixedThreadPool(CLIENT_QTY);
    ExecutorCompletionService<Void> completionService = new ExecutorCompletionService<Void>(executorService);
    TestingCluster cluster = new TestingCluster(3);
    try {/*  w w  w . j  a va  2 s .c  o  m*/
        cluster.start();

        final AtomicReference<String> connectionString = new AtomicReference<String>(
                cluster.getConnectString());
        final EnsembleProvider provider = new EnsembleProvider() {
            @Override
            public void start() throws Exception {
            }

            @Override
            public String getConnectionString() {
                return connectionString.get();
            }

            @Override
            public void close() throws IOException {
            }
        };

        final Semaphore acquiredSemaphore = new Semaphore(0);
        final AtomicInteger acquireCount = new AtomicInteger(0);
        final CountDownLatch suspendedLatch = new CountDownLatch(CLIENT_QTY);
        for (int i = 0; i < CLIENT_QTY; ++i) {
            completionService.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    CuratorFramework client = CuratorFrameworkFactory.builder().ensembleProvider(provider)
                            .sessionTimeoutMs(timing.session()).connectionTimeoutMs(timing.connection())
                            .retryPolicy(new ExponentialBackoffRetry(100, 3)).build();
                    try {
                        final Semaphore suspendedSemaphore = new Semaphore(0);
                        client.getConnectionStateListenable().addListener(new ConnectionStateListener() {
                            @Override
                            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                                if ((newState == ConnectionState.SUSPENDED)
                                        || (newState == ConnectionState.LOST)) {
                                    suspendedLatch.countDown();
                                    suspendedSemaphore.release();
                                }
                            }
                        });

                        client.start();

                        InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, PATH, 1);

                        while (!Thread.currentThread().isInterrupted()) {
                            Lease lease = null;
                            try {
                                lease = semaphore.acquire();
                                acquiredSemaphore.release();
                                acquireCount.incrementAndGet();
                                suspendedSemaphore.acquire();
                            } catch (Exception e) {
                                // just retry
                            } finally {
                                if (lease != null) {
                                    acquireCount.decrementAndGet();
                                    IOUtils.closeQuietly(lease);
                                }
                            }
                        }
                    } finally {
                        IOUtils.closeQuietly(client);
                    }
                    return null;
                }
            });
        }

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        Assert.assertEquals(1, acquireCount.get());

        cluster.close();
        timing.awaitLatch(suspendedLatch);
        timing.forWaiting().sleepABit();
        Assert.assertEquals(0, acquireCount.get());

        cluster = new TestingCluster(3);
        cluster.start();

        connectionString.set(cluster.getConnectString());
        timing.forWaiting().sleepABit();

        Assert.assertTrue(timing.acquireSemaphore(acquiredSemaphore));
        timing.forWaiting().sleepABit();
        Assert.assertEquals(1, acquireCount.get());
    } finally {
        executorService.shutdown();
        executorService.awaitTermination(10, TimeUnit.SECONDS);
        executorService.shutdownNow();
        IOUtils.closeQuietly(cluster);
    }
}