Example usage for java.util.concurrent Semaphore acquireUninterruptibly

List of usage examples for java.util.concurrent Semaphore acquireUninterruptibly

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore acquireUninterruptibly.

Prototype

public void acquireUninterruptibly() 

Source Link

Document

Acquires a permit from this semaphore, blocking until one is available.

Usage

From source file:org.commoncrawl.util.MapReduceJobStatsWriter.java

public static void main(String[] args) {
    LOG.info("Initializing Hadoop Config");

    Configuration conf = new Configuration();

    conf.addResource("nutch-default.xml");
    conf.addResource("nutch-site.xml");
    conf.addResource("hadoop-default.xml");
    conf.addResource("hadoop-site.xml");
    conf.addResource("commoncrawl-default.xml");
    conf.addResource("commoncrawl-site.xml");

    CrawlEnvironment.setHadoopConfig(conf);
    CrawlEnvironment.setDefaultHadoopFSURI("hdfs://ccn01:9000/");

    // test the stats Writer ... 
    try {//  ww  w. jav  a 2  s . c  o m

        LOG.info("Opening Stats Writer");
        MapReduceJobStatsWriter<IntWritable, Text> statsWriter = new MapReduceJobStatsWriter<IntWritable, Text>(
                CrawlEnvironment.getDefaultFileSystem(), conf, IntWritable.class, Text.class, "test", "group1",
                12345L);

        LOG.info("Writing Entries");
        for (int i = 0; i < 1000; ++i) {
            statsWriter.appendLogEntry(new IntWritable(i), new Text("Log Entry #" + i));
        }
        LOG.info("Flushing / Closing");
        final Semaphore blockingSempahore = new Semaphore(0);
        statsWriter.close(new Callback() {

            @Override
            public void execute() {
                LOG.info("Completion Callback Triggered");
                blockingSempahore.release();
            }

        });
        LOG.info("Waiting on Semaphore");
        blockingSempahore.acquireUninterruptibly();
        LOG.info("Acquired Semaphore");

        LOG.info("Closed");

        Path hdfsPath = new Path(Environment.HDFS_LOGCOLLECTOR_BASEDIR,
                "test" + "/" + "group1" + "/" + Long.toString(12345L));

        LOG.info("Opening Reader");
        SequenceFile.Reader reader = new SequenceFile.Reader(CrawlEnvironment.getDefaultFileSystem(), hdfsPath,
                conf);
        IntWritable key = new IntWritable();
        Text value = new Text();
        while (reader.next(key, value)) {
            LOG.info("Key:" + key.get() + " Value:" + value.toString());
        }
        reader.close();

    } catch (IOException e) {
        LOG.error(CCStringUtils.stringifyException(e));
    }

}

From source file:org.commoncrawl.util.shared.S3Downloader.java

public void shutdown() {

    if (_callback == null) {
        throw new RuntimeException("Invalid State - stop called on already inactive downloader");
    }/* w  w w  .  j  a  v  a  2s. c om*/

    _freezeDownloads = true;

    Thread eventThread = (_ownsEventLoop) ? _eventLoop.getEventThread() : null;

    final Semaphore shutdownSemaphore = new Semaphore(0);

    _eventLoop.setTimer(new Timer(1, false, new Timer.Callback() {

        // shutdown within the context of the async thread ... 
        public void timerFired(Timer timer) {

            try {

                // fail any active connections 
                for (NIOHttpConnection connection : Lists.newArrayList(_activeConnections)) {
                    S3DownloadItem item = (S3DownloadItem) connection.getContext();
                    if (item != null) {
                        failDownload(item, NIOHttpConnection.ErrorType.UNKNOWN, connection, false);
                    }
                }

                _activeConnections.clear();

                // next, fail all queued items 
                for (S3DownloadItem item : _queuedItems) {
                    failDownload(item, NIOHttpConnection.ErrorType.UNKNOWN, null, false);
                }
                _queuedItems.clear();
                _freezeDownloads = false;
                _callback = null;

                if (_ownsEventLoop) {
                    //System.out.println("Stopping Event Loop");
                    _eventLoop.stop();
                }
                _eventLoop = null;
                _ownsEventLoop = false;
            } finally {
                //System.out.println("Releasing Semaphore");
                shutdownSemaphore.release();
            }
        }
    }));
    //System.out.println("Acquiring Shutdown Semaphore");
    shutdownSemaphore.acquireUninterruptibly();
    //System.out.println("Acquired Shutdown Semaphore");

    try {

        if (eventThread != null) {
            eventThread.join();
        }
    } catch (InterruptedException e) {
    }
}

From source file:org.onosproject.loadtest.DistributedConsensusLoadTest.java

private void startTest() {
    stopped.set(false);//from w  ww  . ja va 2  s  . c o m
    RateLimiter limiter = RateLimiter.create(rate);
    Semaphore s = new Semaphore(100);
    while (!stopped.get()) {
        limiter.acquire();
        s.acquireUninterruptibly();
        counters.get(RandomUtils.nextInt(TOTAL_COUNTERS)).incrementAndGet().whenComplete((r, e) -> {
            s.release();
            if (e == null) {
                increments.incrementAndGet();
            }
        });
    }
}

From source file:org.thiesen.jiffs.jobs.clusterer.Clusterer.java

private Map<String, Long> findClusters() {
    final Iterable<StoryDBO> unprocessed = _storyDAO.findForClustering();
    final StopWatch watch = new StopWatch();

    watch.start();/*  w  w  w.  j a v  a  2 s .co  m*/

    final Map<String, Long> foundClusters = Maps.newConcurrentMap();
    final Semaphore maxEnqueedTasks = new Semaphore(100000);
    final List<ClusterItem> clusterItems = Lists.newLinkedList(transform(unprocessed));
    final Iterator<ClusterItem> firstIterator = clusterItems.iterator();
    while (firstIterator.hasNext()) {
        final ClusterItem firstItem = firstIterator.next();
        for (final ClusterItem secondItem : clusterItems) {
            if (firstItem == secondItem) {
                continue;
            }
            EXECUTOR.submit(new ClusterFinder(maxEnqueedTasks, foundClusters, firstItem, secondItem));
            maxEnqueedTasks.acquireUninterruptibly();
        }
        firstIterator.remove();
    }

    EXECUTOR.shutdown();

    try {
        EXECUTOR.awaitTermination(1, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        Thread.interrupted();
    }
    watch.stop();

    System.out.println("Clustering took " + watch);

    return foundClusters;
}