List of usage examples for java.util.concurrent LinkedBlockingQueue take
public E take() throws InterruptedException
From source file:org.apache.bookkeeper.metadata.etcd.EtcdRegistrationTest.java
private void testWatchBookies(boolean readonly) throws Exception { LinkedBlockingQueue<Versioned<Set<BookieSocketAddress>>> writableChanges = new LinkedBlockingQueue<>(); LinkedBlockingQueue<Versioned<Set<BookieSocketAddress>>> readonlyChanges = new LinkedBlockingQueue<>(); result(regClient.watchReadOnlyBookies(newRegistrationListener(readonlyChanges))); result(regClient.watchWritableBookies(newRegistrationListener(writableChanges))); Versioned<Set<BookieSocketAddress>> versionedBookies = writableChanges.take(); assertTrue(versionedBookies.getValue().isEmpty()); versionedBookies = readonlyChanges.take(); assertTrue(versionedBookies.getValue().isEmpty()); final int numBookies = 3; final List<EtcdRegistrationManager> bookies = createNumBookies(readonly, numBookies, scope, 1); LinkedBlockingQueue<Versioned<Set<BookieSocketAddress>>> changes; if (readonly) { changes = readonlyChanges;//from w ww . j a va 2 s.c o m } else { changes = writableChanges; } Version preVersion = new LongVersion(-1); Set<BookieSocketAddress> expectedBookies = new HashSet<>(); for (int i = 0; i < numBookies; i++) { BookieSocketAddress address = new BookieSocketAddress(newBookie(i)); expectedBookies.add(address); versionedBookies = changes.take(); Version curVersion = versionedBookies.getVersion(); assertEquals(Occurred.AFTER, curVersion.compare(preVersion)); assertEquals(expectedBookies, versionedBookies.getValue()); preVersion = curVersion; } bookies.forEach(EtcdRegistrationManager::close); for (int i = 0; i < numBookies; i++) { versionedBookies = changes.take(); Version curVersion = versionedBookies.getVersion(); assertEquals(Occurred.AFTER, curVersion.compare(preVersion)); assertEquals(numBookies - i - 1, versionedBookies.getValue().size()); preVersion = curVersion; } if (readonly) { assertEquals(0, writableChanges.size()); } else { assertEquals(0, readonlyChanges.size()); } }
From source file:org.apache.bookkeeper.metadata.etcd.helpers.KeySetReaderTest.java
@Test public void testWatchSingleKey() throws Exception { String key = RandomStringUtils.randomAlphabetic(16); ByteSequence keyBs = ByteSequence.fromString(key); KeySetReader<String> ksReader = null; try {// w ww . j av a 2 s .c om ksReader = new KeySetReader<>(etcdClient, BYTE_SEQUENCE_STRING_FUNCTION, keyBs, null); LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>(); Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications); // key not exists Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer)); assertTrue("VersionedKeys : " + versionedKeys, ((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L); assertEquals(0, versionedKeys.getValue().size()); assertTrue(ksReader.isWatcherSet()); // keys should be cached assertEquals(versionedKeys, ksReader.getLocalValue()); Versioned<Set<String>> newVersionedKey = notifications.take(); assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(versionedKeys, newVersionedKey); versionedKeys = newVersionedKey; // update a value String value = RandomStringUtils.randomAlphabetic(32); ByteSequence valueBs = ByteSequence.fromString(value); FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs)); // we should get notified with updated key set newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(1, newVersionedKey.getValue().size()); assertEquals(Sets.newHashSet(key), newVersionedKey.getValue()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); versionedKeys = newVersionedKey; // delete the key FutureUtils.result(etcdClient.getKVClient().delete(keyBs)); newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(0, newVersionedKey.getValue().size()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); } finally { if (null != ksReader) { ksReader.close(); } } assertNotNull(ksReader); assertFalse(ksReader.isWatcherSet()); }
From source file:org.apache.bookkeeper.metadata.etcd.helpers.KeySetReaderTest.java
@Test public void testWatchSingleKeyWithTTL() throws Exception { String key = RandomStringUtils.randomAlphabetic(16); ByteSequence keyBs = ByteSequence.fromString(key); KeySetReader<String> ksReader = null; try {/*from w ww .j ava 2 s .c o m*/ ksReader = new KeySetReader<>(etcdClient, BYTE_SEQUENCE_STRING_FUNCTION, keyBs, null); LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>(); Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications); // key not exists Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer)); assertTrue("VersionedKeys : " + versionedKeys, ((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L); assertEquals(0, versionedKeys.getValue().size()); assertTrue(ksReader.isWatcherSet()); // keys should be cached assertEquals(versionedKeys, ksReader.getLocalValue()); // no watch event should be issued Versioned<Set<String>> newVersionedKey = notifications.take(); assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(versionedKeys, newVersionedKey); versionedKeys = newVersionedKey; // create a key with ttl long leaseId = FutureUtils.result(etcdClient.getLeaseClient().grant(1)).getID(); String value = RandomStringUtils.randomAlphabetic(32); ByteSequence valueBs = ByteSequence.fromString(value); FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs, PutOption.newBuilder().withLeaseId(leaseId).build())); // we should get notified with updated key set newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(1, newVersionedKey.getValue().size()); assertEquals(Sets.newHashSet(key), newVersionedKey.getValue()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); versionedKeys = newVersionedKey; // the key will be deleted after TTL newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(0, newVersionedKey.getValue().size()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); } finally { if (null != ksReader) { ksReader.close(); } } assertNotNull(ksReader); assertFalse(ksReader.isWatcherSet()); }
From source file:org.apache.bookkeeper.metadata.etcd.helpers.KeySetReaderTest.java
@Test public void testWatchKeySet() throws Exception { String prefix = RandomStringUtils.randomAlphabetic(16); ByteSequence beginKeyBs = ByteSequence.fromString(prefix + "-000"); ByteSequence endKeyBs = ByteSequence.fromString(prefix + "-999"); KeySetReader<String> ksReader = null; try {//from w w w .j a va2 s . c om ksReader = new KeySetReader<>(etcdClient, BYTE_SEQUENCE_STRING_FUNCTION, beginKeyBs, endKeyBs); LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>(); Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications); // key not exists Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer)); assertTrue("VersionedKeys : " + versionedKeys, ((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L); assertEquals(0, versionedKeys.getValue().size()); assertTrue(ksReader.isWatcherSet()); // keys should be cached assertEquals(versionedKeys, ksReader.getLocalValue()); Versioned<Set<String>> newVersionedKey = notifications.take(); assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(versionedKeys, newVersionedKey); versionedKeys = newVersionedKey; Set<String> expectedKeySet = new HashSet<>(); for (int i = 0; i < 20; i++) { // update a value String key = String.format("%s-%03d", prefix, i); String value = RandomStringUtils.randomAlphabetic(32); ByteSequence keyBs = ByteSequence.fromString(key); ByteSequence valueBs = ByteSequence.fromString(value); expectedKeySet.add(key); FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs)); // we should get notified with updated key set newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(expectedKeySet, newVersionedKey.getValue()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); versionedKeys = newVersionedKey; } for (int i = 0; i < 20; i++) { // delete the key String key = String.format("%s-%03d", prefix, i); ByteSequence keyBs = ByteSequence.fromString(key); expectedKeySet.remove(key); FutureUtils.result(etcdClient.getKVClient().delete(keyBs)); // we should get notified with updated key set newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(expectedKeySet, newVersionedKey.getValue()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); versionedKeys = newVersionedKey; } } finally { if (null != ksReader) { ksReader.close(); } } assertNotNull(ksReader); assertFalse(ksReader.isWatcherSet()); }
From source file:org.apache.bookkeeper.metadata.etcd.helpers.KeySetReaderTest.java
@Test public void testWatchKeySetWithTTL() throws Exception { String prefix = RandomStringUtils.randomAlphabetic(16); ByteSequence beginKeyBs = ByteSequence.fromString(prefix + "-000"); ByteSequence endKeyBs = ByteSequence.fromString(prefix + "-999"); KeySetReader<String> ksReader = null; try {//from w w w .j a v a 2 s . co m ksReader = new KeySetReader<>(etcdClient, BYTE_SEQUENCE_STRING_FUNCTION, beginKeyBs, endKeyBs); LinkedBlockingQueue<Versioned<Set<String>>> notifications = new LinkedBlockingQueue<>(); Consumer<Versioned<Set<String>>> keyConsumer = consumeVersionedKeySet(notifications); // key not exists Versioned<Set<String>> versionedKeys = FutureUtils.result(ksReader.readAndWatch(keyConsumer)); assertTrue("VersionedKeys : " + versionedKeys, ((LongVersion) versionedKeys.getVersion()).getLongVersion() > 0L); assertEquals(0, versionedKeys.getValue().size()); assertTrue(ksReader.isWatcherSet()); // keys should be cached assertEquals(versionedKeys, ksReader.getLocalValue()); // no watch event should be issued Versioned<Set<String>> newVersionedKey = notifications.take(); assertEquals(Occurred.CONCURRENTLY, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(versionedKeys, newVersionedKey); versionedKeys = newVersionedKey; // create keys with ttl long leaseId = FutureUtils.result(etcdClient.getLeaseClient().grant(1)).getID(); KeepAliveListener ka = etcdClient.getLeaseClient().keepAlive(leaseId); Set<String> expectedKeySet = new HashSet<>(); for (int i = 0; i < 20; i++) { String key = String.format("%s-%03d", prefix, i); String value = RandomStringUtils.randomAlphabetic(32); ByteSequence keyBs = ByteSequence.fromString(key); ByteSequence valueBs = ByteSequence.fromString(value); expectedKeySet.add(key); FutureUtils.result(etcdClient.getKVClient().put(keyBs, valueBs, PutOption.newBuilder().withLeaseId(leaseId).build())); // we should get notified with updated key set newVersionedKey = notifications.take(); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertEquals(expectedKeySet, newVersionedKey.getValue()); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); versionedKeys = newVersionedKey; } // stop keep alive all the keys should be expired. ka.close(); // all the keys will be deleted after TTL in same batch. newVersionedKey = notifications.take(); // local value should be changed assertEquals(newVersionedKey, ksReader.getLocalValue()); assertEquals(Occurred.AFTER, newVersionedKey.getVersion().compare(versionedKeys.getVersion())); assertTrue(newVersionedKey.getValue().isEmpty()); } finally { if (null != ksReader) { ksReader.close(); } } assertNotNull(ksReader); assertFalse(ksReader.isWatcherSet()); }
From source file:org.apache.distributedlog.admin.DistributedLogAdmin.java
private static Map<String, StreamCandidate> checkStreams(final Namespace namespace, final Collection<String> streams, final OrderedScheduler scheduler, final int concurrency) throws IOException { final LinkedBlockingQueue<String> streamQueue = new LinkedBlockingQueue<String>(); streamQueue.addAll(streams);//from w ww. j a va 2s. c o m final Map<String, StreamCandidate> candidateMap = new ConcurrentSkipListMap<String, StreamCandidate>(); final AtomicInteger numPendingStreams = new AtomicInteger(streams.size()); final CountDownLatch doneLatch = new CountDownLatch(1); Runnable checkRunnable = new Runnable() { @Override public void run() { while (!streamQueue.isEmpty()) { String stream; try { stream = streamQueue.take(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); break; } StreamCandidate candidate; try { LOG.info("Checking stream {}.", stream); candidate = checkStream(namespace, stream, scheduler); LOG.info("Checked stream {} - {}.", stream, candidate); } catch (Throwable e) { LOG.error("Error on checking stream {} : ", stream, e); doneLatch.countDown(); break; } if (null != candidate) { candidateMap.put(stream, candidate); } if (numPendingStreams.decrementAndGet() == 0) { doneLatch.countDown(); } } } }; Thread[] threads = new Thread[concurrency]; for (int i = 0; i < concurrency; i++) { threads[i] = new Thread(checkRunnable, "check-thread-" + i); threads[i].start(); } try { doneLatch.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } if (numPendingStreams.get() != 0) { throw new IOException(numPendingStreams.get() + " streams left w/o checked"); } for (int i = 0; i < concurrency; i++) { threads[i].interrupt(); try { threads[i].join(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } } return candidateMap; }
From source file:org.apache.hedwig.server.proxy.HedwigProxy.java
public void start() throws InterruptedException { final LinkedBlockingQueue<Boolean> queue = new LinkedBlockingQueue<Boolean>(); new Thread(tg, new Runnable() { @Override//from w ww . j av a 2 s. com public void run() { client = new HedwigClient(cfg); ThreadFactoryBuilder tfb = new ThreadFactoryBuilder(); serverSocketChannelFactory = new NioServerSocketChannelFactory( Executors.newCachedThreadPool(tfb.setNameFormat("HedwigProxy-NIOBoss-%d").build()), Executors.newCachedThreadPool(tfb.setNameFormat("HedwigProxy-NIOWorker-%d").build())); initializeHandlers(); initializeNetty(); queue.offer(true); } }).start(); queue.take(); }
From source file:org.commoncrawl.service.listcrawler.CrawlHistoryManager.java
private static void launchInTestMode() { File baseTestDir = new File("/tmp/logManagerTest"); FileUtils.recursivelyDeleteFile(baseTestDir); baseTestDir.mkdir();/*from ww w.j a va 2 s .co m*/ File remoteDir = new File(baseTestDir, "remote"); File localDir = new File(baseTestDir, "local"); remoteDir.mkdir(); localDir.mkdir(); final TreeMap<String, URLFP> urlToFPMap = new TreeMap<String, URLFP>(); final TreeMap<URLFP, String> urlFPToString = new TreeMap<URLFP, String>(); Set<String> list1 = Sets.newHashSet(urlList1); Set<String> list2 = Sets.newHashSet(urlList2); final Set<String> combined = Sets.union(list1, list2); Set<String> difference = Sets.difference(list1, list2); final Set<String> completedURLS = new HashSet<String>(); for (String url : combined) { URLFP fingerprint = URLUtils.getURLFPFromURL(url, true); urlToFPMap.put(url, fingerprint); urlFPToString.put(fingerprint, url); } File testInputFile1 = new File(localDir, "INPUT_LIST-" + System.currentTimeMillis()); File testInputFile2 = new File(localDir, "INPUT_LIST-" + (System.currentTimeMillis() + 1)); try { generateTestURLFile(testInputFile1, urlList1); generateTestURLFile(testInputFile2, urlList2); FileSystem localFileSystem = FileSystem.getLocal(CrawlEnvironment.getHadoopConfig()); EventLoop eventLoop = new EventLoop(); eventLoop.start(); final CrawlHistoryManager logManager = new CrawlHistoryManager(localFileSystem, new Path(remoteDir.getAbsolutePath()), localDir, eventLoop, 0); final LinkedBlockingQueue<ProxyCrawlHistoryItem> queue = new LinkedBlockingQueue<ProxyCrawlHistoryItem>(); final Semaphore initialListComplete = new Semaphore(0); logManager.startQueueLoaderThread(new CrawlQueueLoader() { @Override public void queueURL(URLFP urlfp, String url) { ProxyCrawlHistoryItem item = new ProxyCrawlHistoryItem(); item.setOriginalURL(url); queue.add(item); } @Override public void flush() { // TODO Auto-generated method stub } }); Thread queueTestThread = new Thread(new Runnable() { @Override public void run() { while (true) { try { ProxyCrawlHistoryItem item = queue.take(); if (item.getOriginalURL().length() == 0) { break; } else { System.out.println("Got:" + item.getOriginalURL()); CrawlURL urlObject = new CrawlURL(); Assert.assertTrue(!completedURLS.contains(item.getOriginalURL())); completedURLS.add(item.getOriginalURL()); urlObject.setLastAttemptResult((byte) CrawlURL.CrawlResult.SUCCESS); urlObject.setUrl(item.getOriginalURL()); urlObject.setResultCode(200); logManager.crawlComplete(urlObject); if (completedURLS.equals(combined)) { System.out.println("Hit Trigger URL. Releasing InitialListComplete Sempahore"); initialListComplete.release(1); } } } catch (InterruptedException e) { } } } }); queueTestThread.start(); logManager.loadList(testInputFile1, 0); logManager.loadList(testInputFile2, 0); System.out.println("Waiting for Initial List to Complete"); initialListComplete.acquireUninterruptibly(); System.out.println("Woke Up"); try { eventLoop.getEventThread().join(); } catch (InterruptedException e) { e.printStackTrace(); } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:org.commoncrawl.service.pagerank.slave.PageRankUtils.java
public static void calculateRank(final Configuration conf, final FileSystem fs, final PRValueMap valueMap, final File jobLocalDir, final String jobWorkPath, final int nodeIndex, final int slaveCount, final int iterationNumber, final SuperDomainFilter superDomainFilter, final ProgressAndCancelCheckCallback progressAndCancelCallback) throws IOException { final LinkedBlockingQueue<CalculateRankQueueItem> readAheadQueue = new LinkedBlockingQueue<CalculateRankQueueItem>( 20);//from w w w .j av a2 s . c o m // build stream vector ... Vector<Path> streamVector = buildCalculationInputStreamVector(jobLocalDir, jobWorkPath, nodeIndex, slaveCount, iterationNumber); // construct a reader ... final SortedPRInputReader reader = new SortedPRInputReader(conf, fs, streamVector, true); Thread readerThread = new Thread(new Runnable() { @Override public void run() { IOException exceptionOut = null; try { TargetAndSources target = null; while ((target = reader.readNextTarget()) != null) { try { readAheadQueue.put(new CalculateRankQueueItem(target)); } catch (InterruptedException e) { } } } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); exceptionOut = e; } finally { if (reader != null) { reader.close(); } } try { readAheadQueue.put(new CalculateRankQueueItem(exceptionOut)); } catch (InterruptedException e1) { } } }); readerThread.start(); int failedUpdates = 0; int totalUpdates = 0; long iterationStart = System.currentTimeMillis(); boolean cancelled = false; while (!cancelled) { CalculateRankQueueItem queueItem = null; try { queueItem = readAheadQueue.take(); } catch (InterruptedException e) { } if (queueItem._next != null) { totalUpdates++; //LOG.info("Target: DomainHash:" + target.target.getDomainHash() + " URLHash:" + target.target.getUrlHash() + " ShardIdx:" + ((target.target.hashCode() & Integer.MAX_VALUE) % CrawlEnvironment.PR_NUMSLAVES)); // now accumulate rank from stream into value map if (!accumulateRank(valueMap, queueItem._next, superDomainFilter)) { failedUpdates++; LOG.error("**TotalUpdates:" + totalUpdates + " Failed Updates:" + failedUpdates); } if ((totalUpdates + failedUpdates) % 10000 == 0) { float percentComplete = (float) reader._totalBytesRead / (float) reader._totalBytesToRead; if (progressAndCancelCallback != null) { cancelled = progressAndCancelCallback.updateProgress(percentComplete); if (cancelled) { LOG.info("Cancel check callback returned true"); } } long timeEnd = System.currentTimeMillis(); int milliseconds = (int) (timeEnd - iterationStart); //LOG.info("Accumulate PR for 10000 Items Took:" + milliseconds + " Milliseconds QueueSize:" + readAheadQueue.size()); iterationStart = System.currentTimeMillis(); } } else { if (queueItem._e != null) { LOG.error(CCStringUtils.stringifyException(queueItem._e)); throw queueItem._e; } else { // now finally pagerank value in value map ... valueMap.finalizePageRank(); } break; } } try { readerThread.join(); } catch (InterruptedException e) { } }
From source file:org.commoncrawl.util.HDFSBlockTransferUtility.java
public static void main(String[] args) { final String transferFromDisk = args[0]; final String transferToDisks[] = args[1].split(","); final LinkedBlockingQueue<String> queues[] = new LinkedBlockingQueue[transferToDisks.length]; final Semaphore waitSemaphore = new Semaphore(-(transferToDisks.length - 1)); for (int i = 0; i < transferToDisks.length; ++i) { queues[i] = new LinkedBlockingQueue<String>(); }//from w w w . ja va 2 s. c om File transferSource = new File(transferFromDisk); for (File transferFile : transferSource.listFiles()) { if (transferFile.isDirectory()) { int partition = Math.abs(transferFile.getName().hashCode() % transferToDisks.length); try { queues[partition].put(transferFile.getAbsolutePath()); } catch (InterruptedException e) { } } else { try { doCopyFile(transferFile, new File(transferToDisks[0], transferFile.getName()), true); } catch (IOException e) { e.printStackTrace(); } } } Thread threads[] = new Thread[transferToDisks.length]; for (int i = 0; i < transferToDisks.length; ++i) { final int threadIdx = i; try { queues[threadIdx].put(""); } catch (InterruptedException e1) { } threads[i] = new Thread(new Runnable() { @Override public void run() { try { File transferToDisk = new File(transferToDisks[threadIdx]); LinkedBlockingQueue<String> queue = queues[threadIdx]; while (true) { try { String nextDir = queue.take(); if (nextDir.length() == 0) { break; } else { File sourceDir = new File(nextDir); File targetDir = new File(transferToDisk, sourceDir.getName()); try { copyFiles(sourceDir, targetDir, true); } catch (IOException e) { e.printStackTrace(); } } } catch (InterruptedException e) { } } } finally { waitSemaphore.release(); } } }); threads[i].start(); } System.out.println("Waiting for Worker Threads"); try { waitSemaphore.acquire(); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } System.out.println("Worker Threads Dead"); }