Example usage for java.util.concurrent Semaphore Semaphore

List of usage examples for java.util.concurrent Semaphore Semaphore

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore Semaphore.

Prototype

public Semaphore(int permits) 

Source Link

Document

Creates a Semaphore with the given number of permits and nonfair fairness setting.

Usage

From source file:com.netflix.curator.x.discovery.TestServiceDiscovery.java

@Test
public void testCrashedServer() throws Exception {
    List<Closeable> closeables = Lists.newArrayList();
    TestingServer server = new TestingServer();
    closeables.add(server);//from www .j a  v a 2s. c om
    try {
        Timing timing = new Timing();
        CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
                timing.connection(), new RetryOneTime(1));
        closeables.add(client);
        client.start();

        final Semaphore semaphore = new Semaphore(0);
        ServiceInstance<String> instance = ServiceInstance.<String>builder().payload("thing").name("test")
                .port(10064).build();
        ServiceDiscovery<String> discovery = new ServiceDiscoveryImpl<String>(client, "/test",
                new JsonInstanceSerializer<String>(String.class), instance) {
            @Override
            protected void internalRegisterService(ServiceInstance<String> service) throws Exception {
                super.internalRegisterService(service);
                semaphore.release();
            }
        };
        closeables.add(discovery);
        discovery.start();

        timing.acquireSemaphore(semaphore);
        Assert.assertEquals(discovery.queryForInstances("test").size(), 1);

        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());
        server.stop();

        server = new TestingServer(server.getPort(), server.getTempDirectory());
        closeables.add(server);

        timing.acquireSemaphore(semaphore);
        Assert.assertEquals(discovery.queryForInstances("test").size(), 1);
    } finally {
        for (Closeable c : closeables) {
            IOUtils.closeQuietly(c);
        }
    }
}

From source file:org.apache.zeppelin.spark.SparkRInterpreter.java

@Override
public void open() {
    // create R script
    createRScript();/*from w  ww  . j ava  2s  . c  o m*/

    int backendTimeout = Integer.parseInt(System.getenv().getOrDefault("SPARKR_BACKEND_TIMEOUT", "120"));

    // Launch a SparkR backend server for the R process to connect to; this will let it see our
    // Java system properties etc.
    ZeppelinRBackend sparkRBackend = new ZeppelinRBackend();

    Semaphore initialized = new Semaphore(0);
    Thread sparkRBackendThread = new Thread("SparkR backend") {
        @Override
        public void run() {
            sparkRBackendPort = sparkRBackend.init();
            initialized.release();
            sparkRBackend.run();
        }
    };

    sparkRBackendThread.start();

    // Wait for RBackend initialization to finish
    try {
        if (initialized.tryAcquire(backendTimeout, TimeUnit.SECONDS)) {
            // Launch R
            CommandLine cmd = CommandLine.parse(getProperty("zeppelin.sparkr.r"));
            cmd.addArgument(scriptPath, false);
            cmd.addArgument("--no-save", false);
            //      cmd.addArgument(getJavaSparkContext().version(), false);
            executor = new DefaultExecutor();
            outputStream = new ByteArrayOutputStream();
            PipedOutputStream ps = new PipedOutputStream();
            in = null;
            try {
                in = new PipedInputStream(ps);
            } catch (IOException e1) {
                throw new InterpreterException(e1);
            }
            ins = new BufferedWriter(new OutputStreamWriter(ps));

            input = new ByteArrayOutputStream();

            PumpStreamHandler streamHandler = new PumpStreamHandler(outputStream, outputStream, in);
            executor.setStreamHandler(streamHandler);
            executor.setWatchdog(new ExecuteWatchdog(ExecuteWatchdog.INFINITE_TIMEOUT));

            Map env = EnvironmentUtils.getProcEnvironment();

            String sparkRInterpreterObjId = sparkRBackend.put(this);
            String uberdataContextObjId = sparkRBackend.put(getUberdataContext());
            env.put("R_PROFILE_USER", scriptPath);
            env.put("SPARK_HOME", getSparkHome());
            env.put("EXISTING_SPARKR_BACKEND_PORT", String.valueOf(sparkRBackendPort));
            env.put("SPARKR_INTERPRETER_ID", sparkRInterpreterObjId);
            env.put("UBERDATA_CONTEXT_ID", uberdataContextObjId);
            logger.info("executing {} {}", env, cmd.toString());
            executor.execute(cmd, env, this);
            logger.info("executed");
            rScriptRunning = true;

        } else {
            System.err.println("SparkR backend did not initialize in " + backendTimeout + " seconds");
            System.exit(-1);
        }
    } catch (InterruptedException e) {
        new InterpreterException((e));
    } catch (IOException e) {
        new InterpreterException((e));
    }

}

From source file:com.pinterest.rocksplicator.controller.WorkerPoolTest.java

@Test
public void testAssignMultiTask() throws Exception {
    Semaphore idleWorkersSemaphore = new Semaphore(0);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(2, 2, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(1));
    WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, new TaskQueue() {
    });//www. j  a v a 2  s  .  c o m
    workerPool.assignTask(getSleepIncrementTask());
    workerPool.assignTask(getSleepIncrementTask());
    workerPool.assignTask(getSleepIncrementTask());
    Thread.sleep(1500);
    // Only expect 2 to finish because the pool size is 2
    Assert.assertEquals(2, SleepIncrementTask.executionCounter.intValue());
    Assert.assertEquals(2, idleWorkersSemaphore.availablePermits());
    Thread.sleep(1000);
}

From source file:com.netflix.curator.ensemble.exhibitor.TestExhibitorEnsembleProvider.java

@Test
public void testChanging() throws Exception {
    TestingServer secondServer = new TestingServer();
    try {//from  w ww.j  ava 2  s. c  om
        String mainConnectionString = "count=1&port=" + server.getPort() + "&server0=localhost";
        String secondConnectionString = "count=1&port=" + secondServer.getPort() + "&server0=localhost";

        final Semaphore semaphore = new Semaphore(0);
        final AtomicReference<String> connectionString = new AtomicReference<String>(mainConnectionString);
        Exhibitors exhibitors = new Exhibitors(Lists.newArrayList("foo", "bar"), 1000,
                dummyConnectionStringProvider);
        ExhibitorRestClient mockRestClient = new ExhibitorRestClient() {
            @Override
            public String getRaw(String hostname, int port, String uriPath, String mimeType) throws Exception {
                semaphore.release();
                return connectionString.get();
            }
        };
        ExhibitorEnsembleProvider provider = new ExhibitorEnsembleProvider(exhibitors, mockRestClient, "/foo",
                10, new RetryOneTime(1));
        provider.pollForInitialEnsemble();

        Timing timing = new Timing().multiple(4);
        final CuratorZookeeperClient client = new CuratorZookeeperClient(provider, timing.session(),
                timing.connection(), null, new RetryOneTime(2));
        client.start();
        try {
            RetryLoop.callWithRetry(client, new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    client.getZooKeeper().create("/test", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE,
                            CreateMode.PERSISTENT);
                    return null;
                }
            });

            connectionString.set(secondConnectionString);
            semaphore.drainPermits();
            semaphore.acquire();

            server.stop(); // create situation where the current zookeeper gets a sys-disconnected

            Stat stat = RetryLoop.callWithRetry(client, new Callable<Stat>() {
                @Override
                public Stat call() throws Exception {
                    return client.getZooKeeper().exists("/test", false);
                }
            });
            Assert.assertNull(stat); // it's a different server so should be null
        } finally {
            client.close();
        }
    } finally {
        IOUtils.closeQuietly(secondServer);
    }
}

From source file:com.pinterest.rocksplicator.controller.DispatcherTest.java

@Test
public void testingMultiTasks() throws Exception {
    sleepTimeMillis = 3000;//from   ww  w.  j  a  v a  2  s .c o  m
    PowerMockito.when(taskQueue.dequeueTask(anyString())).thenReturn(getSleepIncrementTaskFromQueue())
            .thenReturn(getSleepIncrementTaskFromQueue()).thenReturn(getSleepIncrementTaskFromQueue())
            .thenReturn(null);
    sleepTimeMillis = 3000;
    Semaphore idleWorkersSemaphore = new Semaphore(2);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(2, 2, 0, TimeUnit.SECONDS,
            new ArrayBlockingQueue<Runnable>(2));
    WorkerPool workerPool = new WorkerPool(threadPoolExecutor, idleWorkersSemaphore, taskQueue);
    TaskDispatcher dispatcher = new TaskDispatcher(2, idleWorkersSemaphore, workerPool, taskQueue);
    dispatcher.start();
    synchronized (SleepIncrementTask.notifyObject) {
        SleepIncrementTask.notifyObject.wait();
        SleepIncrementTask.notifyObject.wait();
    }
    Assert.assertTrue(SleepIncrementTask.executionCounter.intValue() <= 3);
    Assert.assertTrue(SleepIncrementTask.executionCounter.intValue() >= 2);
    dispatcher.stop();
}

From source file:com.impetus.ankush2.utils.LogsManager.java

public String downloadLogsOnServer() throws AnkushException {
    try {// w w w  .ja  va2 s  . c  o  m
        String clusterResourcesLogsDir = AppStoreWrapper.getClusterResourcesPath() + "logs/";

        String clusterLogsDirName = "Logs_" + this.clusterConfig.getName() + "_" + System.currentTimeMillis();

        String clusterLogsArchiveName = clusterLogsDirName + ".zip";

        final String cmpLogsDirPathOnServer = clusterResourcesLogsDir + clusterLogsDirName + "/" + component
                + "/";

        if (!FileUtils.ensureFolder(cmpLogsDirPathOnServer)) {
            throw (new AnkushException("Could not create log directory for " + this.component + " on server."));
        }

        final Semaphore semaphore = new Semaphore(nodes.size());

        try {
            for (final String host : nodes) {
                semaphore.acquire();
                AppStoreWrapper.getExecutor().execute(new Runnable() {
                    @Override
                    public void run() {
                        NodeConfig nodeConfig = clusterConfig.getNodes().get(host);

                        SSHExec connection = SSHUtils.connectToNode(host, clusterConfig.getAuthConf());
                        if (connection == null) {
                            // TODO: handle Error
                            LOG.error("Could not fetch log files - Connection not initialized", component,
                                    host);
                        }
                        Serviceable serviceableObj = null;
                        try {
                            serviceableObj = ObjectFactory.getServiceObject(component);

                            for (String role : roles) {
                                if (nodeConfig.getRoles().get(component).contains(role)) {

                                    String tmpLogsDirOnServer = cmpLogsDirPathOnServer + "/" + role + "/" + host
                                            + "/";
                                    if (!FileUtils.ensureFolder(tmpLogsDirOnServer)) {
                                        // TODO: handle Error
                                        // Log error in operation table and
                                        // skip
                                        // this role
                                        continue;
                                    }

                                    String nodeLogsDirPath = FileUtils.getSeparatorTerminatedPathEntry(
                                            serviceableObj.getLogDirPath(clusterConfig, host, role));
                                    String logFilesRegex = serviceableObj.getLogFilesRegex(clusterConfig, host,
                                            role, null);
                                    String outputTarArchiveName = role + "_" + +System.currentTimeMillis()
                                            + ".tar.gz";
                                    try {
                                        List<String> logsFilesList = AnkushUtils.listFilesInDir(connection,
                                                host, nodeLogsDirPath, logFilesRegex);

                                        AnkushTask ankushTask = new CreateTarArchive(nodeLogsDirPath,
                                                nodeLogsDirPath + outputTarArchiveName, logsFilesList);
                                        if (connection.exec(ankushTask).rc != 0) {
                                            // TODO: handle Error
                                            // Log error in operation table
                                            // and
                                            // skip this
                                            // role
                                            continue;
                                        }
                                        connection.downloadFile(nodeLogsDirPath + outputTarArchiveName,
                                                tmpLogsDirOnServer + outputTarArchiveName);
                                        ankushTask = new Remove(nodeLogsDirPath + outputTarArchiveName);
                                        connection.exec(ankushTask);
                                        System.out.println("tmpLogsDirOnServer + outputTarArchiveName : "
                                                + tmpLogsDirOnServer + outputTarArchiveName);
                                        ankushTask = new UnTarArchive(tmpLogsDirOnServer + outputTarArchiveName,
                                                tmpLogsDirOnServer);
                                        System.out.println(
                                                "ankushTask.getCommand() : " + ankushTask.getCommand());
                                        Runtime.getRuntime().exec(ankushTask.getCommand()).waitFor();
                                        ankushTask = new Remove(tmpLogsDirOnServer + outputTarArchiveName);
                                        Runtime.getRuntime().exec(ankushTask.getCommand()).waitFor();
                                    } catch (Exception e) {
                                        e.printStackTrace();
                                        // TODO: handle exception
                                        // Log error in operation table and
                                        // skip
                                        // this role
                                        continue;
                                    }
                                }
                            }
                        } catch (Exception e) {
                            // TODO: handle exception
                            return;
                        } finally {
                            if (semaphore != null) {
                                semaphore.release();
                            }
                            if (connection != null) {
                                connection.disconnect();
                            }
                        }
                    }
                });
            }
            semaphore.acquire(nodes.size());
        } catch (Exception e) {

        }

        ZipUtil.pack(new File(clusterResourcesLogsDir + clusterLogsDirName),
                new File(clusterResourcesLogsDir + clusterLogsArchiveName), true);

        org.apache.commons.io.FileUtils.deleteDirectory(new File(clusterResourcesLogsDir + clusterLogsDirName));

        // result.put(com.impetus.ankush2.constant.Constant.Keys.DOWNLOADPATH,
        // clusterResourcesLogsDir + clusterLogsArchiveName);
    } catch (Exception e) {
        // this.addAndLogError("Could not download logs for " + component +
        // ".");
        LOG.error(e.getMessage(), component, e);
    }
    return null;
}

From source file:com.amazonaws.services.s3.internal.MultiFileOutputStream.java

/**
 * Used to initialized this stream. This method is an SPI (service provider
 * interface) that is called from <code>AmazonS3EncryptionClient</code>.
 * <p>/* ww w .  j a  va  2s . c om*/
 * Implementation of this method should never block.
 *
 * @param observer
 *            the upload object observer
 * @param partSize
 *            part size for multi-part upload
 * @param diskLimit
 *            the maximum disk space to be used for this multi-part upload
 *
 * @return this object
 */
@SuppressWarnings("checkstyle:hiddenfield")
public MultiFileOutputStream init(UploadObjectObserver observer, long partSize, long diskLimit) {
    if (observer == null) {
        throw new IllegalArgumentException("Observer must be specified");
    }
    this.observer = observer;
    if (diskLimit < partSize << 1) {
        throw new IllegalArgumentException(
                "Maximum temporary disk space must be at least twice as large as the part size: partSize="
                        + partSize + ", diskSize=" + diskLimit);
    }
    this.partSize = partSize;
    this.diskLimit = diskLimit;
    final int max = (int) (diskLimit / partSize);
    this.diskPermits = max < 0 ? null : new Semaphore(max);
    return this;
}

From source file:org.commoncrawl.service.directory.BlockingClient.java

void connect(InetAddress address) throws IOException {
    try {//from w  w w  .  j  a va  2s  .co  m
        System.out.println("Connecting to server at:" + address);
        _channel = new AsyncClientChannel(_eventLoop, null,
                new InetSocketAddress(address, CrawlEnvironment.DIRECTORY_SERVICE_RPC_PORT), this);
        _blockingCallSemaphore = new Semaphore(0);
        _channel.open();
        _serviceStub = new DirectoryService.AsyncStub(_channel);
        System.out.println("Waiting on Connect... ");
        _blockingCallSemaphore.acquireUninterruptibly();
        System.out.println("Connect Semaphore Released... ");
        _blockingCallSemaphore = null;

        if (!_channel.isOpen()) {
            throw new IOException("Connection Failed!");
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.marmotta.ldclient.services.ldclient.LDClient.java

public LDClient(ClientConfiguration config) {
    log.info("Initialising Linked Data Client Service ...");

    this.config = config;

    endpoints = new ArrayList<>();
    for (Endpoint endpoint : defaultEndpoints) {
        endpoints.add(endpoint);/*from w ww.ja v a 2 s . co  m*/
    }
    endpoints.addAll(config.getEndpoints());

    Collections.sort(endpoints);
    if (log.isInfoEnabled()) {
        for (Endpoint endpoint : endpoints) {
            log.info("- LDClient Endpoint: {}", endpoint.getName());
        }
    }

    providers = new ArrayList<>();
    for (DataProvider provider : defaultProviders) {
        providers.add(provider);
    }
    providers.addAll(config.getProviders());
    if (log.isInfoEnabled()) {
        for (DataProvider provider : providers) {
            log.info("- LDClient Provider: {}", provider.getName());
        }
    }

    retrievalSemaphore = new Semaphore(config.getMaxParallelRequests());

    if (config.getHttpClient() != null) {
        log.debug("Using HttpClient provided in the configuration");
        this.client = config.getHttpClient();
    } else {
        log.debug("Creating default HttpClient based on the configuration");

        HttpParams httpParams = new BasicHttpParams();
        httpParams.setParameter(CoreProtocolPNames.USER_AGENT, "Apache Marmotta LDClient");

        httpParams.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, config.getSocketTimeout());
        httpParams.setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, config.getConnectionTimeout());

        httpParams.setBooleanParameter(ClientPNames.HANDLE_REDIRECTS, true);
        httpParams.setIntParameter(ClientPNames.MAX_REDIRECTS, 3);

        SchemeRegistry schemeRegistry = new SchemeRegistry();
        schemeRegistry.register(new Scheme("http", 80, PlainSocketFactory.getSocketFactory()));

        try {
            SSLContext sslcontext = SSLContext.getInstance("TLS");
            sslcontext.init(null, null, null);
            SSLSocketFactory sf = new SSLSocketFactory(sslcontext, SSLSocketFactory.STRICT_HOSTNAME_VERIFIER);

            schemeRegistry.register(new Scheme("https", 443, sf));
        } catch (NoSuchAlgorithmException e) {
            e.printStackTrace();
        } catch (KeyManagementException e) {
            e.printStackTrace();
        }

        PoolingClientConnectionManager cm = new PoolingClientConnectionManager(schemeRegistry);
        cm.setMaxTotal(20);
        cm.setDefaultMaxPerRoute(10);

        DefaultHttpClient client = new DefaultHttpClient(cm, httpParams);
        client.setRedirectStrategy(new LMFRedirectStrategy());
        client.setHttpRequestRetryHandler(new LMFHttpRequestRetryHandler());
        idleConnectionMonitorThread = new IdleConnectionMonitorThread(client.getConnectionManager());
        idleConnectionMonitorThread.start();

        this.client = client;
    }
}

From source file:com.netflix.curator.framework.recipes.leader.TestLeaderSelector.java

@Test
public void testKillSession() throws Exception {
    final Timing timing = new Timing();

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    client.start();/*from   w w w  .  ja  v  a  2s  .c o m*/
    try {
        final Semaphore semaphore = new Semaphore(0);
        final CountDownLatch interruptedLatch = new CountDownLatch(1);
        final AtomicInteger leaderCount = new AtomicInteger(0);
        LeaderSelectorListener listener = new LeaderSelectorListener() {
            private volatile Thread ourThread;

            @Override
            public void takeLeadership(CuratorFramework client) throws Exception {
                leaderCount.incrementAndGet();
                try {
                    ourThread = Thread.currentThread();
                    semaphore.release();
                    try {
                        Thread.sleep(1000000);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        interruptedLatch.countDown();
                    }
                } finally {
                    leaderCount.decrementAndGet();
                }
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
                if ((newState == ConnectionState.LOST) && (ourThread != null)) {
                    ourThread.interrupt();
                }
            }
        };
        LeaderSelector leaderSelector1 = new LeaderSelector(client, PATH_NAME, listener);
        LeaderSelector leaderSelector2 = new LeaderSelector(client, PATH_NAME, listener);

        leaderSelector1.start();
        leaderSelector2.start();

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));

        KillSession.kill(client.getZookeeperClient().getZooKeeper(), server.getConnectString());

        Assert.assertTrue(timing.awaitLatch(interruptedLatch));
        timing.sleepABit();

        leaderSelector1.requeue();
        leaderSelector2.requeue();

        Assert.assertTrue(timing.acquireSemaphore(semaphore, 1));
        Assert.assertEquals(leaderCount.get(), 1);

        leaderSelector1.close();
        leaderSelector2.close();
    } finally {
        client.close();
    }
}