Example usage for java.util.concurrent Semaphore release

List of usage examples for java.util.concurrent Semaphore release

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore release.

Prototype

public void release() 

Source Link

Document

Releases a permit, returning it to the semaphore.

Usage

From source file:org.springframework.integration.ip.tcp.TcpSendingMessageHandlerTests.java

@Test
public void testNioSingleUseWithInbound() throws Exception {
    final int port = SocketUtils.findAvailableServerSocket();
    final CountDownLatch latch = new CountDownLatch(1);
    final Semaphore semaphore = new Semaphore(0);
    final AtomicBoolean done = new AtomicBoolean();
    Executors.newSingleThreadExecutor().execute(new Runnable() {
        public void run() {
            try {
                ServerSocket server = ServerSocketFactory.getDefault().createServerSocket(port);
                latch.countDown();/*from  w  ww .  j  ava2 s .  c o m*/
                for (int i = 1; i < 3; i++) {
                    Socket socket = server.accept();
                    semaphore.release();
                    byte[] b = new byte[6];
                    readFully(socket.getInputStream(), b);
                    b = ("Reply" + i + "\r\n").getBytes();
                    socket.getOutputStream().write(b);
                    socket.close();
                }
                server.close();
            } catch (Exception e) {
                if (!done.get()) {
                    e.printStackTrace();
                }
            }
        }
    });
    AbstractConnectionFactory ccf = new TcpNioClientConnectionFactory("localhost", port);
    ByteArrayCrLfSerializer serializer = new ByteArrayCrLfSerializer();
    ccf.setSerializer(serializer);
    ccf.setDeserializer(serializer);
    ccf.setSoTimeout(10000);
    ccf.start();
    ccf.setSingleUse(true);
    TcpSendingMessageHandler handler = new TcpSendingMessageHandler();
    handler.setConnectionFactory(ccf);
    TcpReceivingChannelAdapter adapter = new TcpReceivingChannelAdapter();
    adapter.setConnectionFactory(ccf);
    QueueChannel channel = new QueueChannel();
    adapter.setOutputChannel(channel);
    assertTrue(latch.await(10, TimeUnit.SECONDS));
    handler.handleMessage(MessageBuilder.withPayload("Test").build());
    handler.handleMessage(MessageBuilder.withPayload("Test").build());
    assertTrue(semaphore.tryAcquire(2, 10000, TimeUnit.MILLISECONDS));
    Set<String> replies = new HashSet<String>();
    for (int i = 0; i < 2; i++) {
        Message<?> mOut = channel.receive(10000);
        assertNotNull(mOut);
        replies.add(new String((byte[]) mOut.getPayload()));
    }
    assertTrue(replies.remove("Reply1"));
    assertTrue(replies.remove("Reply2"));
    done.set(true);
    ccf.stop();
}

From source file:org.springframework.batch.core.jsr.launch.JsrJobOperator.java

/**
 * Creates a child {@link ApplicationContext} for the job being requested based upon
 * the /META-INF/batch.xml (if exists) and the /META-INF/batch-jobs/&lt;jobName&gt;.xml
 * configuration and launches the job.  Per JSR-352, calls to this method will always
 * create a new {@link JobInstance} (and related {@link JobExecution}).
 *
 * @param jobName the name of the job XML file without the .xml that is located within the
 * /META-INF/batch-jobs directory.//from www. j  av  a 2s .com
 * @param params any job parameters to be used during the execution of this job.
 */
@Override
public long start(String jobName, Properties params) throws JobStartException, JobSecurityException {
    final JsrXmlApplicationContext batchContext = new JsrXmlApplicationContext(params);
    batchContext.setValidating(false);

    Resource batchXml = new ClassPathResource("/META-INF/batch.xml");
    String jobConfigurationLocation = "/META-INF/batch-jobs/" + jobName + ".xml";
    Resource jobXml = new ClassPathResource(jobConfigurationLocation);

    if (batchXml.exists()) {
        batchContext.load(batchXml);
    }

    if (jobXml.exists()) {
        batchContext.load(jobXml);
    }

    AbstractBeanDefinition beanDefinition = BeanDefinitionBuilder
            .genericBeanDefinition("org.springframework.batch.core.jsr.JsrJobContextFactoryBean")
            .getBeanDefinition();
    beanDefinition.setScope(BeanDefinition.SCOPE_SINGLETON);
    batchContext.registerBeanDefinition(JSR_JOB_CONTEXT_BEAN_NAME, beanDefinition);

    if (baseContext != null) {
        batchContext.setParent(baseContext);
    } else {
        batchContext.getBeanFactory().registerSingleton("jobExplorer", jobExplorer);
        batchContext.getBeanFactory().registerSingleton("jobRepository", jobRepository);
        batchContext.getBeanFactory().registerSingleton("jobParametersConverter", jobParametersConverter);
        batchContext.getBeanFactory().registerSingleton("transactionManager", transactionManager);
    }

    try {
        batchContext.refresh();
    } catch (BeanCreationException e) {
        throw new JobStartException(e);
    }

    Assert.notNull(jobName, "The job name must not be null.");

    final org.springframework.batch.core.JobExecution jobExecution;

    try {
        JobParameters jobParameters = jobParametersConverter.getJobParameters(params);
        String[] jobNames = batchContext.getBeanNamesForType(Job.class);

        if (jobNames == null || jobNames.length <= 0) {
            throw new BatchRuntimeException("No Job defined in current context");
        }

        org.springframework.batch.core.JobInstance jobInstance = jobRepository.createJobInstance(jobNames[0],
                jobParameters);
        jobExecution = jobRepository.createJobExecution(jobInstance, jobParameters, jobConfigurationLocation);
    } catch (Exception e) {
        throw new JobStartException(e);
    }

    try {
        final Semaphore semaphore = new Semaphore(1);
        final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>());
        semaphore.acquire();

        taskExecutor.execute(new Runnable() {

            @Override
            public void run() {
                JsrJobContextFactoryBean factoryBean = null;
                try {
                    factoryBean = (JsrJobContextFactoryBean) batchContext
                            .getBean("&" + JSR_JOB_CONTEXT_BEAN_NAME);
                    factoryBean.setJobExecution(jobExecution);
                    final Job job = batchContext.getBean(Job.class);
                    semaphore.release();
                    // Initialization of the JobExecution for job level dependencies
                    jobRegistry.register(job, jobExecution);
                    job.execute(jobExecution);
                    jobRegistry.remove(jobExecution);
                } catch (Exception e) {
                    exceptionHolder.add(e);
                } finally {
                    if (factoryBean != null) {
                        factoryBean.close();
                    }

                    batchContext.close();

                    if (semaphore.availablePermits() == 0) {
                        semaphore.release();
                    }
                }
            }
        });

        semaphore.acquire();
        if (exceptionHolder.size() > 0) {
            semaphore.release();
            throw new JobStartException(exceptionHolder.get(0));
        }
    } catch (Exception e) {
        if (jobRegistry.exists(jobExecution.getId())) {
            jobRegistry.remove(jobExecution);
        }
        jobExecution.upgradeStatus(BatchStatus.FAILED);
        if (jobExecution.getExitStatus().equals(ExitStatus.UNKNOWN)) {
            jobExecution.setExitStatus(ExitStatus.FAILED.addExitDescription(e));
        }
        jobRepository.update(jobExecution);

        if (batchContext.isActive()) {
            batchContext.close();
        }

        throw new JobStartException(e);
    }
    return jobExecution.getId();
}

From source file:com.netflix.curator.framework.recipes.shared.TestSharedCount.java

@Test
public void testMultiClients() throws Exception {
    final int CLIENT_QTY = 5;

    List<Future<List<Integer>>> futures = Lists.newArrayList();
    final List<CuratorFramework> clients = new CopyOnWriteArrayList<CuratorFramework>();
    try {//w w w. ja  va2 s .  c  o m
        final CountDownLatch startLatch = new CountDownLatch(CLIENT_QTY);
        final Semaphore semaphore = new Semaphore(0);
        ExecutorService service = Executors
                .newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("Test-%d").build());
        for (int i = 0; i < CLIENT_QTY; ++i) {
            Future<List<Integer>> future = service.submit(new Callable<List<Integer>>() {
                @Override
                public List<Integer> call() throws Exception {
                    final List<Integer> countList = Lists.newArrayList();
                    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                            new RetryOneTime(1));
                    clients.add(client);
                    client.start();

                    SharedCount count = new SharedCount(client, "/count", 10);

                    final CountDownLatch latch = new CountDownLatch(1);
                    count.addListener(new SharedCountListener() {
                        @Override
                        public void countHasChanged(SharedCountReader sharedCount, int newCount)
                                throws Exception {
                            if (newCount < 0) {
                                latch.countDown();
                            } else {
                                countList.add(newCount);
                            }

                            semaphore.release();
                        }

                        @Override
                        public void stateChanged(CuratorFramework client, ConnectionState newState) {
                        }
                    });
                    count.start();
                    startLatch.countDown();
                    latch.await();
                    return countList;
                }
            });
            futures.add(future);
        }

        CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(),
                new RetryOneTime(1));
        clients.add(client);
        client.start();

        Assert.assertTrue(startLatch.await(10, TimeUnit.SECONDS));

        SharedCount count = new SharedCount(client, "/count", 10);
        count.start();

        List<Integer> countList = Lists.newArrayList();
        Random random = new Random();
        for (int i = 0; i < 100; ++i) {
            Thread.sleep(random.nextInt(10));

            int next = random.nextInt(100);
            countList.add(next);
            count.setCount(next);

            Assert.assertTrue(semaphore.tryAcquire(CLIENT_QTY, 10, TimeUnit.SECONDS));
        }
        count.setCount(-1);

        for (Future<List<Integer>> future : futures) {
            List<Integer> thisCountList = future.get();
            Assert.assertEquals(thisCountList, countList);
        }
    } finally {
        for (CuratorFramework client : clients) {
            IOUtils.closeQuietly(client);
        }
    }
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testParallelCreateStream() throws Exception {
    final ExecutorService executorService = Executors.newFixedThreadPool(10);
    Semaphore createCount = new Semaphore(-19);
    AtomicBoolean success = new AtomicBoolean(true);
    for (int i = 0; i < 10; i++) {
        executorService.submit(() -> {
            for (int j = 0; j < 2; j++) {
                try {
                    CompletableFuture<Boolean> createStreamStatus;
                    createStreamStatus = controllerClient
                            .createStream(StreamConfiguration.builder().streamName("streamparallel")
                                    .scope("scope1").scalingPolicy(ScalingPolicy.fixed(1)).build());
                    log.info("{}", createStreamStatus.get());
                    assertTrue(createStreamStatus.get());
                    createCount.release();
                } catch (Exception e) {
                    log.error("Exception when creating stream: {}", e);

                    // Don't wait for other threads to complete.
                    success.set(false);// w w  w  . ja va  2  s  .  c  om
                    createCount.release(20);
                }
            }
        });
    }
    createCount.acquire();
    executorService.shutdownNow();
    assertTrue(success.get());
}

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleLookup(CommandLookupTopic lookup) {
    final long requestId = lookup.getRequestId();
    final String topic = lookup.getTopic();
    if (log.isDebugEnabled()) {
        log.debug("[{}] Received Lookup from {} for {}", topic, remoteAddress, requestId);
    }/* ww  w. jav  a2  s  .  c  o m*/
    final Semaphore lookupSemaphore = service.getLookupRequestSemaphore();
    if (lookupSemaphore.tryAcquire()) {
        lookupDestinationAsync(getBrokerService().pulsar(), DestinationName.get(topic),
                lookup.getAuthoritative(), getRole(), lookup.getRequestId()).handle((lookupResponse, ex) -> {
                    if (ex == null) {
                        ctx.writeAndFlush(lookupResponse);
                    } else {
                        // it should never happen
                        log.warn("[{}] lookup failed with error {}, {}", remoteAddress, topic, ex.getMessage(),
                                ex);
                        ctx.writeAndFlush(
                                newLookupResponse(ServerError.ServiceNotReady, ex.getMessage(), requestId));
                    }
                    lookupSemaphore.release();
                    return null;
                });
    } else {
        if (log.isDebugEnabled()) {
            log.debug("[{}] Failed lookup due to too many lookup-requets {}", remoteAddress, topic);
        }
        ctx.writeAndFlush(newLookupResponse(ServerError.TooManyRequests,
                "Failed due to too many pending lookup requests", requestId));
    }

}

From source file:org.springframework.integration.ip.tcp.TcpSendingMessageHandlerTests.java

@Test
public void testNioSingleUseWithInboundMany() throws Exception {
    final int port = SocketUtils.findAvailableServerSocket();
    final CountDownLatch latch = new CountDownLatch(1);
    final Semaphore semaphore = new Semaphore(0);
    final AtomicBoolean done = new AtomicBoolean();
    final List<Socket> serverSockets = new ArrayList<Socket>();
    Executors.newSingleThreadExecutor().execute(new Runnable() {
        public void run() {
            try {
                ServerSocket server = ServerSocketFactory.getDefault().createServerSocket(port, 100);
                latch.countDown();/*from  w w w  . j  ava 2s . c o m*/
                for (int i = 0; i < 100; i++) {
                    Socket socket = server.accept();
                    serverSockets.add(socket);
                    semaphore.release();
                    byte[] b = new byte[9];
                    readFully(socket.getInputStream(), b);
                    b = ("Reply" + i + "\r\n").getBytes();
                    socket.getOutputStream().write(b);
                    socket.close();
                }
                server.close();
            } catch (Exception e) {
                if (!done.get()) {
                    e.printStackTrace();
                }
            }
        }
    });
    AbstractConnectionFactory ccf = new TcpNioClientConnectionFactory("localhost", port);
    ByteArrayCrLfSerializer serializer = new ByteArrayCrLfSerializer();
    ccf.setSerializer(serializer);
    ccf.setDeserializer(serializer);
    ccf.setSoTimeout(10000);
    ccf.setSingleUse(true);
    ccf.setTaskExecutor(Executors.newFixedThreadPool(100));
    ccf.start();
    TcpSendingMessageHandler handler = new TcpSendingMessageHandler();
    handler.setConnectionFactory(ccf);
    TcpReceivingChannelAdapter adapter = new TcpReceivingChannelAdapter();
    adapter.setConnectionFactory(ccf);
    QueueChannel channel = new QueueChannel();
    adapter.setOutputChannel(channel);
    assertTrue(latch.await(10, TimeUnit.SECONDS));
    int i = 0;
    try {
        for (i = 100; i < 200; i++) {
            handler.handleMessage(MessageBuilder.withPayload("Test" + i).build());
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail("Exception at " + i);
    }
    assertTrue(semaphore.tryAcquire(100, 20000, TimeUnit.MILLISECONDS));
    Set<String> replies = new HashSet<String>();
    for (i = 100; i < 200; i++) {
        Message<?> mOut = channel.receive(20000);
        assertNotNull(mOut);
        replies.add(new String((byte[]) mOut.getPayload()));
    }
    for (i = 0; i < 100; i++) {
        assertTrue("Reply" + i + " missing", replies.remove("Reply" + i));
    }
    done.set(true);
    ccf.stop();
}

From source file:com.bt.sdk.callcontrol.sip.util.EhCacheCollectionImpl.java

@SuppressWarnings("unchecked")
public void replace(T info) {
    if (info == null)
        throw new IllegalArgumentException(String.format(
                "Trying to replace element in collection %s with null info", this.getClass().getSimpleName()));

    String infoId = info.getId();
    log.debug(String.format("InMemoryInfoCollection replacing %s", infoId));
    if (!semaphoreCache.getKeys().contains(infoId))
        throw new IllegalArgumentException(
                String.format("Trying to replace non-existing info %s in collection %s", infoId,
                        this.getClass().getSimpleName()));
    Semaphore semaphore = (Semaphore) semaphoreCache.get(infoId).getObjectValue();
    if (semaphore == null)
        throw new IllegalArgumentException(
                String.format("Trying to replace non-existing info %s in collection %s", infoId,
                        this.getClass().getSimpleName()));

    try {//from   w  w  w  .j a v  a  2 s  . c  om
        semaphore.acquire();
    } catch (InterruptedException e) {
        log.error(String.format(FAILED_TO_READ_OBJECT_MESSAGE, infoId, this.getClass().getSimpleName(),
                e.getMessage()), e);
        throw new CollectionAccessInterruptedException(String.format(FAILED_TO_READ_OBJECT_MESSAGE, infoId,
                this.getClass().getSimpleName(), e.getMessage()), e);
    }

    try {
        //T oldInfo = infos.get(infoId);
        T oldInfo = (T) cache.get(infoId).getObjectValue();
        if (!oldInfo.getVersionId().equals(info.getVersionId()))
            throw new ConcurrentUpdateException(infoId, String.format(
                    "Info %s modified in collection %s, try again", infoId, this.getClass().getSimpleName()));

        T newInfo = info.cloneObject();
        newInfo.updateVersionId();
        doExtraUpdates(info, newInfo);
        //infos.put(infoId, (T)newInfo);
        cache.put(new Element(infoId, newInfo));
        info.setVersionId(newInfo.getVersionId());
        log.debug(String.format("Replaced info %s, new version %s", infoId, newInfo.getVersionId()));
    } finally {
        semaphore.release();
    }
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testParallelGetCurrentSegments() throws Exception {
    final ExecutorService executorService = Executors.newFixedThreadPool(10);
    Semaphore createCount = new Semaphore(-19);
    AtomicBoolean success = new AtomicBoolean(true);
    for (int i = 0; i < 10; i++) {
        executorService.submit(() -> {
            for (int j = 0; j < 2; j++) {
                try {
                    CompletableFuture<StreamSegments> streamSegments;
                    streamSegments = controllerClient.getCurrentSegments("scope1", "streamparallel");
                    assertTrue(streamSegments.get().getSegments().size() == 2);
                    assertEquals(new Segment("scope1", "streamparallel", 0),
                            streamSegments.get().getSegmentForKey(0.2));
                    assertEquals(new Segment("scope1", "streamparallel", 1),
                            streamSegments.get().getSegmentForKey(0.6));
                    createCount.release();
                } catch (Exception e) {
                    log.error("Exception when getting segments: {}", e);

                    // Don't wait for other threads to complete.
                    success.set(false);/* w ww  .ja  va  2s .co m*/
                    createCount.release(20);
                }
            }
        });
    }
    createCount.acquire();
    executorService.shutdownNow();
    assertTrue(success.get());
}

From source file:org.kuali.mobility.icons.service.IconsServiceImpl.java

/**
 * Creates the required file on the filesystem
 *
 * @param file File to write too//from  w w w.j av a  2  s  . c  om
 * @param icon WebIcon that we need to create a file of
 * @return size The size of the icon (this size must have the multiplier applied already)
 */
private File createFile(final File file, WebIcon icon, int size) {
    Semaphore oneToAdd = new Semaphore(1);
    /*
     * If there currently is a semaphore for the filename, the existing sema will be returned,
     * else the new semaphore will be added and return null (because there is no old value)
     */
    Semaphore sema = lockingMap.putIfAbsent(file.getName(), oneToAdd);
    if (sema == null) {
        sema = oneToAdd;
    }
    try {

        sema.acquire(); // Second concurrent user will wait here
        if (imageFileExists(file)) {
            return file;
        }
        file.createNewFile();

        Resource iconResource = applicationContext.getResource(icon.getPath());
        PNGTranscoder t = new PNGTranscoder();
        t.addTranscodingHint(PNGTranscoder.KEY_MAX_HEIGHT, new Float(size));
        t.addTranscodingHint(PNGTranscoder.KEY_MAX_WIDTH, new Float(size));
        t.addTranscodingHint(PNGTranscoder.KEY_BACKGROUND_COLOR, new Color(0, 0, 0, 0));
        OutputStream ostream = new FileOutputStream(file);

        // Create the transcoder input.
        TranscoderInput input = new TranscoderInput(iconResource.getInputStream());

        // Create the transcoder output.
        TranscoderOutput output = new TranscoderOutput(ostream);

        // Save the image.
        t.transcode(input, output);

        // Flush and close the stream.
        ostream.flush();
        ostream.close();
    } catch (Exception ex) {
        LOG.warn("Exception while creating file", ex);
        if (file.exists()) {
            file.delete();
        }
    } finally {
        sema.release();
    }
    return file;
}

From source file:com.amazonaws.services.sqs.buffered.SendQueueBuffer.java

/**
 * Submits an outbound request for delivery to the queue associated with
 * this buffer.//from ww w . j av  a 2  s. com
 * <p>
 *
 * @param operationLock
 *            the lock synchronizing calls for the call type (
 *            {@code sendMessage}, {@code deleteMessage},
 *            {@code changeMessageVisibility} )
 * @param openOutboundBatchTask
 *            the open batch task for this call type
 * @param request
 *            the request to submit
 * @param inflightOperationBatches
 *            the permits controlling the batches for this type of request
 * @return never null
 * @throws AmazonClientException
 *             (see the various outbound calls for details)
 */
@SuppressWarnings("unchecked")
<OBT extends OutboundBatchTask<R, Result>, R extends AmazonWebServiceRequest, Result> QueueBufferFuture<R, Result> submitOutboundRequest(
        Object operationLock, OBT[] openOutboundBatchTask, R request, final Semaphore inflightOperationBatches,
        QueueBufferCallback<R, Result> callback) {
    /*
     * Callers add requests to a single batch task (openOutboundBatchTask)
     * until it is full or maxBatchOpenMs elapses. The total number of batch
     * task in flight is controlled by the inflightOperationBatch semaphore
     * capped at maxInflightOutboundBatches.
     */
    QueueBufferFuture<R, Result> theFuture = null;
    try {
        synchronized (operationLock) {
            if (openOutboundBatchTask[0] == null
                    || ((theFuture = openOutboundBatchTask[0].addRequest(request, callback))) == null) {
                OBT obt = (OBT) newOutboundBatchTask(request);
                inflightOperationBatches.acquire();
                openOutboundBatchTask[0] = obt;
                // Register a listener for the event signaling that the
                // batch task has completed (successfully or not).
                openOutboundBatchTask[0].onCompleted = new Listener<OutboundBatchTask<R, Result>>() {
                    public void invoke(OutboundBatchTask<R, Result> task) {
                        inflightOperationBatches.release();
                    }
                };

                if (log.isTraceEnabled()) {
                    log.trace("Queue " + qUrl + " created new batch for " + request.getClass().toString() + " "
                            + inflightOperationBatches.availablePermits() + " free slots remain");
                }

                theFuture = openOutboundBatchTask[0].addRequest(request, callback);
                executor.execute(openOutboundBatchTask[0]);
                if (null == theFuture) {
                    //this can happen only if the request itself is flawed,
                    //so that it can't be added to any batch, even a brand
                    //new one
                    throw new AmazonClientException("Failed to schedule request " + request + " for execution");
                }
            }
        }

    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        AmazonClientException toThrow = new AmazonClientException("Interrupted while waiting for lock.");
        toThrow.initCause(e);
        throw toThrow;
    }

    return theFuture;
}