Example usage for java.util.concurrent Semaphore Semaphore

List of usage examples for java.util.concurrent Semaphore Semaphore

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore Semaphore.

Prototype

public Semaphore(int permits) 

Source Link

Document

Creates a Semaphore with the given number of permits and nonfair fairness setting.

Usage

From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerTest.java

private void runAndTestWorker(List<Shard> shardList, int threadPoolSize, List<KinesisClientLease> initialLeases,
        boolean callProcessRecordsForEmptyRecordList, int numberOfRecordsPerShard) throws Exception {
    File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard,
            "unitTestWT001");
    IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());

    Semaphore recordCounter = new Semaphore(0);
    ShardSequenceVerifier shardSequenceVerifier = new ShardSequenceVerifier(shardList);
    TestStreamletFactory recordProcessorFactory = new TestStreamletFactory(recordCounter,
            shardSequenceVerifier);//w  ww. ja v  a2  s.  co m

    ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);

    WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList,
            failoverTimeMillis, numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory,
            executorService, nullMetricsFactory);

    // TestStreamlet will release the semaphore once for every record it processes
    recordCounter.acquire(numberOfRecordsPerShard * shardList.size());

    // Wait a bit to allow the worker to spin against the end of the stream.
    Thread.sleep(500L);

    testWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList,
            numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory);

    workerThread.getWorker().shutdown();
    executorService.shutdownNow();
    file.delete();
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testParallelGetCurrentSegments() throws Exception {
    final ExecutorService executorService = Executors.newFixedThreadPool(10);
    Semaphore createCount = new Semaphore(-19);
    AtomicBoolean success = new AtomicBoolean(true);
    for (int i = 0; i < 10; i++) {
        executorService.submit(() -> {
            for (int j = 0; j < 2; j++) {
                try {
                    CompletableFuture<StreamSegments> streamSegments;
                    streamSegments = controllerClient.getCurrentSegments("scope1", "streamparallel");
                    assertTrue(streamSegments.get().getSegments().size() == 2);
                    assertEquals(new Segment("scope1", "streamparallel", 0),
                            streamSegments.get().getSegmentForKey(0.2));
                    assertEquals(new Segment("scope1", "streamparallel", 1),
                            streamSegments.get().getSegmentForKey(0.6));
                    createCount.release();
                } catch (Exception e) {
                    log.error("Exception when getting segments: {}", e);

                    // Don't wait for other threads to complete.
                    success.set(false);/*from ww  w .j a  va 2  s . c  o m*/
                    createCount.release(20);
                }
            }
        });
    }
    createCount.acquire();
    executorService.shutdownNow();
    assertTrue(success.get());
}

From source file:android.webkit.cts.WebViewTest.java

private void doSaveWebArchive(String baseName, boolean autoName, final String expectName) throws Throwable {
    final Semaphore saving = new Semaphore(0);
    ValueCallback<String> callback = new ValueCallback<String>() {
        @Override/*from   ww w. j ava2  s  . c  o  m*/
        public void onReceiveValue(String savedName) {
            assertEquals(expectName, savedName);
            saving.release();
        }
    };

    mOnUiThread.saveWebArchive(baseName, autoName, callback);
    assertTrue(saving.tryAcquire(TEST_TIMEOUT, TimeUnit.MILLISECONDS));
}

From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java

/**
 * Function to update seed node information after add or remove nodes on all
 * nodes in a cluster//from  ww  w . j av a  2 s .c  om
 */
private void updateSeedNodeValue() {
    // Seeds update command
    String cassandraYaml = advanceConf.get(CassandraConstants.ClusterProperties.CONF_DIR)
            + CassandraConstants.Cassandra_Configuration_Files.CASSANDRA_YAML;
    final String command = "sed -i -e 's/- seeds: \".*$/- seeds: \"" + getSeedNodeString() + "\"/' "
            + cassandraYaml;

    Map<String, Map<String, Object>> nodes = new HashMap<String, Map<String, Object>>(
            componentConfig.getNodes());
    if (clusterConfig.getState().equals(Constant.Cluster.State.REMOVE_NODE)) {
        nodes = new HashMap<String, Map<String, Object>>(returnNodes());
    }

    final Semaphore semaphore = new Semaphore(nodes.size());
    try {
        for (final String host : nodes.keySet()) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        SSHExec connection = getConnection(host);
                        if (connection != null) {
                            execCustomtask(command, connection, host, "Could not update seeds value");
                        }
                    } catch (AnkushException e) {
                        addClusterError(e.getMessage(), host, e);
                    } catch (Exception e) {
                        addClusterError("Could not update seed node value in cassandra.yaml file.", host, e);
                    }
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodes.size());

    } catch (Exception e) {
        logger.error("Error in editing seeds values.", e);
    }
}

From source file:org.commoncrawl.service.listcrawler.CacheManager.java

/********************************************************************************************************/

public static void main(String[] args) {

    final EventLoop eventLoop = new EventLoop();
    eventLoop.start();//from  ww  w.  j  ava  2 s  .  c  o m

    final CacheManager manager = new CacheManager(eventLoop);
    // delete active log if it exists ... 
    manager.getActiveLogFilePath().delete();
    try {
        manager.initialize(INIT_FLAG_SKIP_CACHE_WRITER_INIT | INIT_FLAG_SKIP_HDFS_WRITER_INIT);
    } catch (IOException e1) {
        LOG.error(CCStringUtils.stringifyException(e1));
        return;
    }

    MessageDigest digester;
    try {
        digester = MessageDigest.getInstance("MD5");
    } catch (NoSuchAlgorithmException e1) {
        LOG.error(CCStringUtils.stringifyException(e1));
        return;
    }

    final byte[] randomBytes = new byte[1 << 15];
    LOG.info("Building Random Digest");
    for (int i = 0; i < randomBytes.length; i += 16) {
        long time = System.nanoTime();
        digester.update((new UID() + "@" + time).getBytes());
        System.arraycopy(digester.digest(), 0, randomBytes, i, 16);
    }

    final Semaphore semaphore = new Semaphore(0);

    if (args[0].equals("populate")) {

        manager.startCacheWriterThread();
        manager.startHDFSFlusherThread();

        try {

            LOG.info("Done Building Random Digest");

            LOG.info("Writing Items To Disk");
            for (int i = 0; i < 1000000; ++i) {

                if (i % 1000 == 0) {
                    LOG.info("Wrote:" + i + " entries");
                }

                final CacheItem item1 = new CacheItem();
                item1.setUrl(manager.normalizeURL("http://www.domain.com/foobar/" + i));
                item1.setContent(new Buffer(randomBytes));
                item1.setUrlFingerprint(URLFingerprint.generate64BitURLFPrint(item1.getUrl()));
                manager.cacheItem(item1, null);
                Thread.sleep(1);

                if (i != 0 && i % 10000 == 0) {
                    LOG.info("Hit 10000 items.. sleeping for 20 seconds");
                    Thread.sleep(20 * 1000);
                }
            }

            Thread.sleep(30000);

            for (int i = 0; i < 1000000; ++i) {

                final String url = new String("http://www.domain.com/foobar/" + i);
                manager.checkCacheForItem(url, new CacheItemCheckCallback() {

                    @Override
                    public void cacheItemAvailable(String url, CacheItem item) {
                        Assert.assertTrue(item.getUrl().equals(url));
                        String itemIndex = url.substring("http://www.domain.com/foobar/".length());
                        int itemNumber = Integer.parseInt(itemIndex);
                        if (itemNumber == 999999) {
                            semaphore.release();
                        }
                    }

                    @Override
                    public void cacheItemNotFound(String url) {
                        Assert.assertTrue(false);
                    }
                });
            }
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
        } catch (InterruptedException e2) {

        }
    } else if (args[0].equals("read")) {

        try {
            final CacheItem item1 = new CacheItem();
            item1.setUrl(manager.normalizeURL("http://www.domain.com/barz/"));
            item1.setUrlFingerprint(URLFingerprint.generate64BitURLFPrint(item1.getUrl()));
            item1.setContent(new Buffer(randomBytes));
            manager.cacheItem(item1, null);

            // queue up cache load requests .... 
            for (int i = 0; i < 10000; ++i) {

                final String url = new String("http://www.domain.com/foobar/" + i);

                eventLoop.setTimer(new Timer(1, false, new Timer.Callback() {

                    @Override
                    public void timerFired(Timer timer) {
                        manager.checkCacheForItem(url, new CacheItemCheckCallback() {

                            @Override
                            public void cacheItemAvailable(String url, CacheItem item) {
                                LOG.info("FOUND Item for URL:" + url + " ContentSize:"
                                        + item.getContent().getCount());
                            }

                            @Override
                            public void cacheItemNotFound(String url) {
                                LOG.info("DIDNOT Find Item for URL:" + url);
                            }

                        });
                    }
                }));
            }

            eventLoop.setTimer(new Timer(1, false, new Timer.Callback() {

                @Override
                public void timerFired(Timer timer) {
                    manager.checkCacheForItem(item1.getUrl(), new CacheItemCheckCallback() {

                        @Override
                        public void cacheItemAvailable(String url, CacheItem item) {
                            LOG.info("FOUND Item for URL:" + url + " ContentSize:"
                                    + item.getContent().getCount());
                        }

                        @Override
                        public void cacheItemNotFound(String url) {
                            LOG.info("DIDNOT Find Item for URL:" + url);
                        }

                    });
                }

            }));
        } catch (IOException e) {
            LOG.error(CCStringUtils.stringifyException(e));
        }
    }
    semaphore.acquireUninterruptibly();

}

From source file:com.parse.ParseUserTest.java

@Test
public void testLogInWithCallback() throws Exception {
    // Register a mock currentUserController to make setCurrentUser work
    ParseCurrentUserController currentUserController = mock(ParseCurrentUserController.class);
    when(currentUserController.setAsync(any(ParseUser.class))).thenReturn(Task.<Void>forResult(null));
    ParseCorePlugins.getInstance().registerCurrentUserController(currentUserController);
    // Register a mock userController to make logIn work
    ParseUserController userController = mock(ParseUserController.class);
    ParseUser.State newUserState = new ParseUser.State.Builder().put("newKey", "newValue")
            .sessionToken("newSessionToken").build();
    when(userController.logInAsync(anyString(), anyString())).thenReturn(Task.forResult(newUserState));
    ParseCorePlugins.getInstance().registerUserController(userController);

    final Semaphore done = new Semaphore(0);
    ParseUser.logInInBackground("userName", "password", new LogInCallback() {
        @Override//from w w  w .ja v  a  2s.  c om
        public void done(ParseUser user, ParseException e) {
            done.release();
            assertNull(e);
            // Make sure user's data is correct
            assertEquals("newSessionToken", user.getSessionToken());
            assertEquals("newValue", user.get("newKey"));
        }
    });

    assertTrue(done.tryAcquire(5, TimeUnit.SECONDS));
    // Make sure user is login
    verify(userController, times(1)).logInAsync("userName", "password");
    // Make sure we set currentUser
    verify(currentUserController, times(1)).setAsync(any(ParseUser.class));
}

From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java

/**
 * Function to update topology information on all nodes in cluster after add
 * or remove nodes/*w  w  w . ja va2s  .co  m*/
 */
private void updateTopologyFile() {
    Map<String, Map<String, Object>> nodes = new HashMap<String, Map<String, Object>>(
            componentConfig.getNodes());
    if (clusterConfig.getState().equals(Constant.Cluster.State.REMOVE_NODE)) {
        nodes = new HashMap<String, Map<String, Object>>(returnNodes());
    }

    final Semaphore semaphore = new Semaphore(nodes.size());
    try {
        for (final String host : nodes.keySet()) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    try {
                        SSHExec connection = getConnection(host);
                        if (connection != null) {
                            editTopologyFile(connection, host);
                        }
                    } catch (AnkushException e) {
                        addClusterError(e.getMessage(), host, e);
                    } catch (Exception e) {
                        addClusterError("Could not update topology details.", host, e);
                    }

                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodes.size());

    } catch (Exception e) {
        logger.error("Error in updating topology file.", e);
    }
}

From source file:com.parse.ParseUserTest.java

@Test
public void testBecomeWithCallback() throws Exception {
    // Register a mock currentUserController to make setCurrentUser work
    ParseCurrentUserController currentUserController = mock(ParseCurrentUserController.class);
    when(currentUserController.setAsync(any(ParseUser.class))).thenReturn(Task.<Void>forResult(null));
    ParseCorePlugins.getInstance().registerCurrentUserController(currentUserController);
    // Register a mock userController to make getUsreAsync work
    ParseUserController userController = mock(ParseUserController.class);
    ParseUser.State newUserState = new ParseUser.State.Builder().put("key", "value")
            .sessionToken("sessionToken").build();
    when(userController.getUserAsync(anyString())).thenReturn(Task.forResult(newUserState));
    ParseCorePlugins.getInstance().registerUserController(userController);

    final Semaphore done = new Semaphore(0);
    ParseUser.becomeInBackground("sessionToken", new LogInCallback() {
        @Override//from ww  w .jav a  2s . c o m
        public void done(ParseUser user, ParseException e) {
            done.release();
            assertNull(e);
            // Make sure user's data is correct
            assertEquals("sessionToken", user.getSessionToken());
            assertEquals("value", user.get("key"));
        }
    });

    // Make sure we call getUserAsync
    verify(userController, times(1)).getUserAsync("sessionToken");
    // Make sure we set currentUser
    verify(currentUserController, times(1)).setAsync(any(ParseUser.class));
}

From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java

@Override
public boolean register(final ClusterConfig conf) {

    try {/* ww  w .  ja va2s . co  m*/
        ComponentConfig compConfig = clusterConfig.getComponents().get(this.componentName);
        if (!AnkushUtils.isMonitoredByAnkush(compConfig)) {
            logger.info("Skipping " + getComponentName() + " registration for Level1", this.componentName);
            return true;
        }
        final String infoMsg = "Registering Cassandra";
        logger.info(infoMsg);

        // Getting node map for cluster deployment
        Map<String, Map<String, Object>> nodeMap = new HashMap<String, Map<String, Object>>(
                componentConfig.getNodes());

        // Node Registration process ...
        final Semaphore semaphore = new Semaphore(nodeMap.size());
        for (final String host : nodeMap.keySet()) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    conf.getNodes().get(host).setStatus(registerNode(host));
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodeMap.size());
        // Return false if any of the node is not deployed.
        return AnkushUtils.getStatus(conf.getNodes());
    } catch (Exception e) {
        return addClusterError("Could not register " + getComponentName(), e);
    }
}

From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java

private static void testWriteMapFileToHDFS(EventLoop eventLoop) {
    try {//from  w w w. j  a  v a  2s.  c o m
        // initialize log manager
        CrawlHistoryManager logManager = initializeTestLogManager(eventLoop, true);

        // initialize item list
        TreeMap<URLFP, ProxyCrawlHistoryItem> items = buildTestList(urlList1);
        final TreeMap<String, URLFP> urlToURLFPMap = new TreeMap<String, URLFP>();

        for (Map.Entry<URLFP, ProxyCrawlHistoryItem> item : items.entrySet()) {
            urlToURLFPMap.put(item.getValue().getOriginalURL(), item.getKey());
        }

        // add to local item map in log manager
        for (ProxyCrawlHistoryItem item : items.values()) {
            logManager.appendItemToLog(item);
        }
        // ok shutdown log manager ...
        logManager.shutdown();

        // restart - reload log file ...
        logManager = initializeTestLogManager(eventLoop, false);

        // write to 'hdfs'
        logManager.doCheckpoint();

        syncAndValidateItems(items, logManager);

        logManager.shutdown();

        // restart
        logManager = initializeTestLogManager(eventLoop, false);

        // tweak original items
        updateTestItemStates(items);

        // ok append items
        for (ProxyCrawlHistoryItem item : items.values()) {
            logManager.appendItemToLog(item);
        }

        syncAndValidateItems(items, logManager);

        // ok now checkpoint the items
        logManager.doCheckpoint();

        // ok now validate one last time
        syncAndValidateItems(items, logManager);

        // shutown
        logManager.shutdown();

        logManager = null;

        {
            // start from scratch ...
            final CrawlHistoryManager logManagerTest = initializeTestLogManager(eventLoop, true);

            // create a final version of the tree map reference
            final TreeMap<URLFP, ProxyCrawlHistoryItem> itemList = items;
            // create filename
            File urlInputFile = new File(logManagerTest.getLocalDataDir(),
                    "testURLS-" + System.currentTimeMillis());
            // ok create a crawl list from urls
            CrawlList.generateTestURLFile(urlInputFile, urlList1);
            long listId = logManagerTest.loadList(urlInputFile, 0);

            CrawlList listObject = logManagerTest.getList(listId);

            final Semaphore listCompletionSemaphore = new Semaphore(-(itemList.size() - 1));

            listObject.setEventListener(new CrawlList.CrawlListEvents() {

                @Override
                public void itemUpdated(URLFP itemFingerprint) {
                    // TODO Auto-generated method stub
                    listCompletionSemaphore.release();
                }
            });

            // ok start the appropriate threads
            logManagerTest.startLogWriterThread(0);
            logManagerTest.startListLoaderThread();
            logManagerTest.startQueueLoaderThread(new CrawlQueueLoader() {

                @Override
                public void queueURL(URLFP urlfp, String url) {
                    logManagerTest.crawlComplete(
                            proxyCrawlHitoryItemToCrawlURL(itemList.get(urlToURLFPMap.get(url))));
                }

                @Override
                public void flush() {
                    // TODO Auto-generated method stub

                }
            });

            LOG.info("Waiting for Release");

            // and wait for the finish
            listCompletionSemaphore.acquireUninterruptibly();

            LOG.info("Got Here");

        }

    } catch (IOException e) {
        LOG.error(CCStringUtils.stringifyException(e));
    }
}