Example usage for java.util.concurrent Semaphore Semaphore

List of usage examples for java.util.concurrent Semaphore Semaphore

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore Semaphore.

Prototype

public Semaphore(int permits) 

Source Link

Document

Creates a Semaphore with the given number of permits and nonfair fairness setting.

Usage

From source file:org.apache.hadoop.hdfs.client.impl.TestBlockReaderFactory.java

/**
 * When an InterruptedException is sent to a thread calling
 * FileChannel#read, the FileChannel is immediately closed and the
 * thread gets an exception.  This effectively means that we might have
 * someone asynchronously calling close() on the file descriptors we use
 * in BlockReaderLocal.  So when unreferencing a ShortCircuitReplica in
 * ShortCircuitCache#unref, we should check if the FileChannel objects
 * are still open.  If not, we should purge the replica to avoid giving
 * it out to any future readers.//from  w w  w .j  a v a2  s  .  c o m
 *
 * This is a regression test for HDFS-6227: Short circuit read failed
 * due to ClosedChannelException.
 *
 * Note that you may still get ClosedChannelException errors if two threads
 * are reading from the same replica and an InterruptedException is delivered
 * to one of them.
 */
@Test(timeout = 120000)
public void testPurgingClosedReplicas() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicInteger replicasCreated = new AtomicInteger(0);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {
        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            replicasCreated.incrementAndGet();
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testPurgingClosedReplicas", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4095;
    final int SEED = 0xFADE0;
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), conf);
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);

    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer()
            .getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (true) {
                    BlockReader blockReader = null;
                    try {
                        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0,
                                TEST_FILE_LEN);
                        sem.release();
                        try {
                            blockReader.readAll(buf, 0, TEST_FILE_LEN);
                        } finally {
                            sem.acquireUninterruptibly();
                        }
                    } catch (ClosedByInterruptException e) {
                        LOG.info("got the expected ClosedByInterruptException", e);
                        sem.release();
                        break;
                    } finally {
                        if (blockReader != null)
                            blockReader.close();
                    }
                    LOG.info("read another " + TEST_FILE_LEN + " bytes.");
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
                sem.release();
            }
        }
    };
    Thread thread = new Thread(readerRunnable);
    thread.start();

    // While the thread is reading, send it interrupts.
    // These should trigger a ClosedChannelException.
    while (thread.isAlive()) {
        sem.acquireUninterruptibly();
        thread.interrupt();
        sem.release();
    }
    Assert.assertFalse(testFailed.get());

    // We should be able to read from the file without
    // getting a ClosedChannelException.
    BlockReader blockReader = null;
    try {
        blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
        blockReader.readFully(buf, 0, TEST_FILE_LEN);
    } finally {
        if (blockReader != null)
            blockReader.close();
    }
    byte expected[] = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(buf, expected));

    // Another ShortCircuitReplica object should have been created.
    Assert.assertEquals(2, replicasCreated.get());

    dfs.close();
    cluster.shutdown();
    sockDir.close();
}

From source file:org.opcfoundation.ua.transport.https.HttpsServer.java

@Override
public EndpointHandle bind(SocketAddress socketAddress, EndpointBinding endpointBinding)
        throws ServiceResultException {
    if (endpointBinding == null || socketAddress == null || endpointBinding.endpointServer != this)
        throw new IllegalArgumentException();
    String url = endpointBinding.endpointAddress.getEndpointUrl();

    // Start endpoint handler
    {//  ww w . j  a va 2s . co  m
        String endpointId = url;
        endpointId = UriUtil.getEndpointName(url);
        if (endpointId == null)
            endpointId = "";
        //         else endpointId = "*"+endpointId;
        HttpAsyncRequestHandler<?> oldEndpointHandler = registry.lookup(endpointId);
        if (oldEndpointHandler == null) {
            HttpsServerEndpointHandler endpointHandler = new HttpsServerEndpointHandler(endpointBinding);
            registry.register(endpointId, endpointHandler);
            registry.register("", discoveryHandler);
        } else {
            HttpsServerEndpointHandler oldEndpointHander2 = (HttpsServerEndpointHandler) oldEndpointHandler;
            if (oldEndpointHander2.endpointServer != endpointBinding.endpointServer) {
                throw new ServiceResultException(StatusCodes.Bad_UnexpectedError,
                        "Cannot bind endpoint " + url + " and "
                                + oldEndpointHander2.endpointBinding.endpointAddress.getEndpointUrl()
                                + " with two different sets of service.");
            }
        }
    }

    // Make socket handle and endpoint handle
    String scheme = UriUtil.getTransportProtocol(endpointBinding.endpointAddress.getEndpointUrl());
    SocketHandle socketHandle = getOrCreateSocketHandle(socketAddress, scheme);

    HttpsEndpointHandle endpointHandle = socketHandle.getOrCreate(endpointBinding);

    try {
        // Shutdown reactor
        shutdownReactor();
        // Create reactor
        initReactor();

        // Bind to listen the given ports
        for (SocketHandle sh : socketHandleSnapshot()) {
            if (sh.listenerEndpoint == null) {
                sh.listenerEndpoint = ioReactor.listen(sh.getSocketAddress());
            }
        }

        // Start reactor threads
        if (UriUtil.SCHEME_HTTPS.equals(scheme)) {
            if (sslReactorThread == null || !sslReactorThread.isAlive()) {
                final IOReactor r = ioReactor;
                final Semaphore s = sslThreadSemaphore = new Semaphore(0);
                sslReactorThread = new Thread() {
                    public void run() {
                        try {
                            setState(CloseableObjectState.Open);
                            r.execute(sslIoEventDispatch);
                        } catch (IOException e) {
                            HttpsServer.this.setError(new ServiceResultException(e));
                        } finally {
                            s.release(9999);
                        }
                    };
                };
                if (!getState().isOpen())
                    setState(CloseableObjectState.Opening);
                sslReactorThread.start();
            }
        }

        if (UriUtil.SCHEME_HTTP.equals(scheme)) {
            if (plainReactorThread == null || !plainReactorThread.isAlive()) {
                final IOReactor r = ioReactor;
                final Semaphore s = plainThreadSemaphore = new Semaphore(0);
                plainReactorThread = new Thread() {
                    public void run() {
                        try {
                            setState(CloseableObjectState.Open);
                            r.execute(plainIoEventDispatch);
                        } catch (IOException e) {
                            HttpsServer.this.setError(new ServiceResultException(e));
                        } finally {
                            s.release(9999);
                        }
                    };
                };
                if (!getState().isOpen())
                    setState(CloseableObjectState.Opening);
                plainReactorThread.start();
            }
        }

    } catch (ServiceResultException e) {
        endpointHandle.close();
        throw e;
    }
    log.info("Endpoint bound to {}", url);
    return endpointHandle;
}

From source file:org.commoncrawl.util.ArcFileWriter.java

/** Unit Test Constructor ***/
public ArcFileWriter() throws IOException {

    if (CrawlEnvironment.getHadoopConfig() == null) {
        Configuration conf = new Configuration();

        conf.addResource("commoncrawl-default.xml");
        conf.addResource("commoncrawl-site.xml");

        CrawlEnvironment.setHadoopConfig(conf);
    }//from w  ww .  j  a v  a 2  s  . c  o m

    _fileSystem = CrawlEnvironment.getDefaultFileSystem();
    _outputPath = new Path("crawl/test");
    _id = 1;
    _maxWritersSemaphore = new Semaphore(_maxWriters);
    rotateFile();
}

From source file:com.microsoft.intellij.helpers.IDEHelperImpl.java

@org.jetbrains.annotations.NotNull
private static Task.Backgroundable getCancellableBackgroundTask(final Project project,
        @NotNull final String name, @Nullable final String indicatorText,
        final CancellableTaskHandleImpl handle, @NotNull final CancellableTask cancellableTask) {
    return new Task.Backgroundable(project, name, true) {
        private final Semaphore lock = new Semaphore(0);

        @Override/*from ww  w  .j  a  va  2  s  . co  m*/
        public void run(@org.jetbrains.annotations.NotNull ProgressIndicator indicator) {
            indicator.setIndeterminate(true);

            handle.setProgressIndicator(indicator);

            if (indicatorText != null) {
                indicator.setText(indicatorText);
            }

            ApplicationManager.getApplication().executeOnPooledThread(new Runnable() {
                @Override
                public void run() {
                    try {
                        cancellableTask.run(handle);
                    } catch (Throwable t) {
                        handle.setException(t);
                    } finally {
                        lock.release();
                    }
                }
            });

            try {
                while (!lock.tryAcquire(1, TimeUnit.SECONDS)) {
                    if (handle.isCancelled()) {
                        ApplicationManager.getApplication().executeOnPooledThread(new Runnable() {
                            @Override
                            public void run() {
                                cancellableTask.onCancel();
                            }
                        });

                        return;
                    }
                }

                ApplicationManager.getApplication().executeOnPooledThread(new Runnable() {
                    @Override
                    public void run() {
                        if (handle.getException() == null) {
                            cancellableTask.onSuccess();
                        } else {
                            cancellableTask.onError(handle.getException());
                        }
                    }
                });
            } catch (InterruptedException ignored) {
            }
        }
    };
}

From source file:com.netflix.curator.framework.recipes.cache.TestPathChildrenCache.java

@Test
public void testRebuildNode() throws Exception {
    PathChildrenCache cache = null;//from  www. j a  v a 2  s  . c om
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();
    try {
        client.create().creatingParentsIfNeeded().forPath("/test/one", "one".getBytes());

        final CountDownLatch latch = new CountDownLatch(1);
        final AtomicInteger counter = new AtomicInteger();
        final Semaphore semaphore = new Semaphore(1);
        cache = new PathChildrenCache(client, "/test", true) {
            @Override
            void getDataAndStat(String fullPath) throws Exception {
                semaphore.acquire();
                counter.incrementAndGet();
                super.getDataAndStat(fullPath);
                latch.countDown();
            }
        };
        cache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);

        latch.await();

        int saveCounter = counter.get();
        client.setData().forPath("/test/one", "alt".getBytes());
        cache.rebuildNode("/test/one");
        Assert.assertEquals(cache.getCurrentData("/test/one").getData(), "alt".getBytes());
        Assert.assertEquals(saveCounter, counter.get());

        semaphore.release(1000);
    } finally {
        IOUtils.closeQuietly(cache);
        IOUtils.closeQuietly(client);
    }
}

From source file:org.commoncrawl.util.ArcFileWriter.java

/**
 * constructor for arc file writer */*from   w w w  .  ja v a 2  s . c o m*/
 * 
 * @throws IOException
 */
public ArcFileWriter(FileSystem fileSystem, Path outputPath, int writerId, int maxSimultaneousWriters)
        throws IOException {

    _fileSystem = fileSystem;
    _outputPath = outputPath;
    _id = writerId;
    _maxWriters = maxSimultaneousWriters;
    _maxWritersSemaphore = new Semaphore(_maxWriters);

    // set up the initial arc file .
    rotateFile();
}

From source file:org.springframework.batch.core.jsr.launch.JsrJobOperator.java

/**
 * Creates a child {@link ApplicationContext} for the job being requested based upon
 * the /META-INF/batch.xml (if exists) and the /META-INF/batch-jobs/&lt;jobName&gt;.xml
 * configuration and restart the job.//from w w  w.j ava  2s  . c o  m
 *
 * @param executionId the database id of the job execution to be restarted.
 * @param params any job parameters to be used during the execution of this job.
 * @throws JobExecutionAlreadyCompleteException thrown if the requested job execution has
 * a status of COMPLETE
 * @throws NoSuchJobExecutionException throw if the requested job execution does not exist
 * in the repository
 * @throws JobExecutionNotMostRecentException thrown if the requested job execution is not
 * the most recent attempt for the job instance it's related to.
 * @throws JobRestartException thrown for any general errors during the job restart process
 */
@Override
public long restart(long executionId, Properties params)
        throws JobExecutionAlreadyCompleteException, NoSuchJobExecutionException,
        JobExecutionNotMostRecentException, JobRestartException, JobSecurityException {
    org.springframework.batch.core.JobExecution previousJobExecution = jobExplorer.getJobExecution(executionId);

    if (previousJobExecution == null) {
        throw new NoSuchJobExecutionException("No JobExecution found for id: [" + executionId + "]");
    } else if (previousJobExecution.getStatus().equals(BatchStatus.COMPLETED)) {
        throw new JobExecutionAlreadyCompleteException("The requested job has already completed");
    }

    List<org.springframework.batch.core.JobExecution> previousExecutions = jobExplorer
            .getJobExecutions(previousJobExecution.getJobInstance());

    for (org.springframework.batch.core.JobExecution jobExecution : previousExecutions) {
        if (jobExecution.getCreateTime().compareTo(previousJobExecution.getCreateTime()) > 0) {
            throw new JobExecutionNotMostRecentException(
                    "The requested JobExecution to restart was not the most recently run");
        }

        if (jobExecution.getStatus().equals(BatchStatus.ABANDONED)) {
            throw new JobRestartException("JobExecution ID: " + jobExecution.getId()
                    + " is abandoned and attempted to be restarted.");
        }
    }

    final String jobName = previousJobExecution.getJobInstance().getJobName();

    Properties jobRestartProperties = getJobRestartProperties(params, previousJobExecution);

    final JsrXmlApplicationContext batchContext = new JsrXmlApplicationContext(jobRestartProperties);
    batchContext.setValidating(false);

    Resource batchXml = new ClassPathResource("/META-INF/batch.xml");
    Resource jobXml = new ClassPathResource(previousJobExecution.getJobConfigurationName());

    if (batchXml.exists()) {
        batchContext.load(batchXml);
    }

    if (jobXml.exists()) {
        batchContext.load(jobXml);
    }

    AbstractBeanDefinition beanDefinition = BeanDefinitionBuilder
            .genericBeanDefinition("org.springframework.batch.core.jsr.JsrJobContextFactoryBean")
            .getBeanDefinition();
    beanDefinition.setScope(BeanDefinition.SCOPE_SINGLETON);
    batchContext.registerBeanDefinition(JSR_JOB_CONTEXT_BEAN_NAME, beanDefinition);

    batchContext.setParent(baseContext);

    try {
        batchContext.refresh();
    } catch (BeanCreationException e) {
        throw new JobRestartException(e);
    }

    final org.springframework.batch.core.JobExecution jobExecution;

    try {
        JobParameters jobParameters = jobParametersConverter.getJobParameters(jobRestartProperties);
        jobExecution = jobRepository.createJobExecution(previousJobExecution.getJobInstance(), jobParameters,
                previousJobExecution.getJobConfigurationName());
    } catch (Exception e) {
        throw new JobRestartException(e);
    }

    try {
        final Semaphore semaphore = new Semaphore(1);
        final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>());
        semaphore.acquire();

        taskExecutor.execute(new Runnable() {

            @Override
            public void run() {
                JsrJobContextFactoryBean factoryBean = null;
                try {
                    factoryBean = (JsrJobContextFactoryBean) batchContext
                            .getBean("&" + JSR_JOB_CONTEXT_BEAN_NAME);
                    factoryBean.setJobExecution(jobExecution);
                    final Job job = batchContext.getBean(Job.class);

                    if (!job.isRestartable()) {
                        throw new JobRestartException("Job " + jobName + " is not restartable");
                    }

                    semaphore.release();
                    // Initialization of the JobExecution for job level dependencies
                    jobRegistry.register(job, jobExecution);
                    job.execute(jobExecution);
                    jobRegistry.remove(jobExecution);
                } catch (Exception e) {
                    exceptionHolder.add(e);
                } finally {
                    if (factoryBean != null) {
                        factoryBean.close();
                    }

                    batchContext.close();

                    if (semaphore.availablePermits() == 0) {
                        semaphore.release();
                    }
                }
            }
        });

        semaphore.acquire();
        if (exceptionHolder.size() > 0) {
            semaphore.release();
            throw new JobRestartException(exceptionHolder.get(0));
        }
    } catch (Exception e) {
        jobExecution.upgradeStatus(BatchStatus.FAILED);
        if (jobExecution.getExitStatus().equals(ExitStatus.UNKNOWN)) {
            jobExecution.setExitStatus(ExitStatus.FAILED.addExitDescription(e));
        }

        jobRepository.update(jobExecution);

        if (batchContext.isActive()) {
            batchContext.close();
        }

        throw new JobRestartException(e);
    }

    return jobExecution.getId();
}

From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java

@Test
public void testPostFailure() throws InterruptedException {
    _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> {
        // Annotations
        Assert.assertEquals(0, r.getAnnotationsCount());

        // Dimensions
        Assert.assertEquals(0, r.getDimensionsCount());

        // Samples
        Assert.assertEquals(0, r.getTimersCount());
        Assert.assertEquals(0, r.getCountersCount());
        Assert.assertEquals(0, r.getGaugesCount());
    })).willReturn(WireMock.aResponse().withStatus(400)));

    final AtomicBoolean assertionResult = new AtomicBoolean(false);
    final Semaphore semaphore = new Semaphore(0);
    final org.slf4j.Logger logger = Mockito.mock(org.slf4j.Logger.class);
    final Sink sink = new ApacheHttpSink(
            new ApacheHttpSink.Builder().setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH))
                    .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 1, 2, false,
                            new CompletionHandler(semaphore))),
            logger);/*  ww  w.j  av a 2  s .  c o  m*/

    final TsdEvent event = new TsdEvent(ANNOTATIONS, TEST_EMPTY_SERIALIZATION_TIMERS,
            TEST_EMPTY_SERIALIZATION_COUNTERS, TEST_EMPTY_SERIALIZATION_GAUGES);

    sink.record(event);
    semaphore.acquire();

    // Ensure expected handler was invoked
    Assert.assertTrue(assertionResult.get());

    // Request matcher
    final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH))
            .withHeader("Content-Type", WireMock.equalTo("application/octet-stream"));

    // Assert that data was sent
    _wireMockRule.verify(1, requestPattern);
    Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty());

    // Assert that an IOException was captured
    Mockito.verify(logger)
            .error(Mockito.startsWith("Received failure response when sending metrics to HTTP endpoint; uri="));
}

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

/**
 * Perform asynchronous operations nodes.
 * //  w  ww . j a v  a  2 s. co  m
 * @param nodeList
 *            {@link Collection}
 * @return <code>true</code>, if successful
 */
private boolean validate(Collection<String> nodeList) throws AnkushException {
    try {
        // Create semaphore to join threads
        final Semaphore semaphore = new Semaphore(nodeList.size());
        for (final String host : nodeList) {
            final NodeConfig nodeConf = clusterConf.getNodes().get(host);
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        nodeConf.setStatus(new GangliaValidator(clusterConf, nodeConf).validate());
                    } catch (AnkushException e) {
                        addClusterError(e.getMessage(), host, e);
                    } catch (Exception e) {
                        addClusterError("There is some exception while validating " + host + " for "
                                + getComponentName() + " deployment. " + GangliaConstants.EXCEPTION_STRING,
                                host, e);
                    } finally {
                        if (semaphore != null) {
                            semaphore.release();
                        }
                    }
                }
            });
        }
        semaphore.acquire(nodeList.size());

    } catch (Exception e) {
        throw new AnkushException("There is some exception while validating nodes for " + getComponentName()
                + " deployment." + GangliaConstants.EXCEPTION_STRING, e);
    }
    return AnkushUtils.getStatus(clusterConf, nodeList);
}