List of usage examples for java.util.concurrent Semaphore Semaphore
public Semaphore(int permits)
From source file:org.commoncrawl.util.HDFSBlockTransferUtility.java
public static void main(String[] args) { final String transferFromDisk = args[0]; final String transferToDisks[] = args[1].split(","); final LinkedBlockingQueue<String> queues[] = new LinkedBlockingQueue[transferToDisks.length]; final Semaphore waitSemaphore = new Semaphore(-(transferToDisks.length - 1)); for (int i = 0; i < transferToDisks.length; ++i) { queues[i] = new LinkedBlockingQueue<String>(); }//from w w w. jav a 2 s . co m File transferSource = new File(transferFromDisk); for (File transferFile : transferSource.listFiles()) { if (transferFile.isDirectory()) { int partition = Math.abs(transferFile.getName().hashCode() % transferToDisks.length); try { queues[partition].put(transferFile.getAbsolutePath()); } catch (InterruptedException e) { } } else { try { doCopyFile(transferFile, new File(transferToDisks[0], transferFile.getName()), true); } catch (IOException e) { e.printStackTrace(); } } } Thread threads[] = new Thread[transferToDisks.length]; for (int i = 0; i < transferToDisks.length; ++i) { final int threadIdx = i; try { queues[threadIdx].put(""); } catch (InterruptedException e1) { } threads[i] = new Thread(new Runnable() { @Override public void run() { try { File transferToDisk = new File(transferToDisks[threadIdx]); LinkedBlockingQueue<String> queue = queues[threadIdx]; while (true) { try { String nextDir = queue.take(); if (nextDir.length() == 0) { break; } else { File sourceDir = new File(nextDir); File targetDir = new File(transferToDisk, sourceDir.getName()); try { copyFiles(sourceDir, targetDir, true); } catch (IOException e) { e.printStackTrace(); } } } catch (InterruptedException e) { } } } finally { waitSemaphore.release(); } } }); threads[i].start(); } System.out.println("Waiting for Worker Threads"); try { waitSemaphore.acquire(); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } System.out.println("Worker Threads Dead"); }
From source file:co.paralleluniverse.photon.Photon.java
public static void main(final String[] args) throws InterruptedException, IOException { final Options options = new Options(); options.addOption("rate", true, "Requests per second (default " + rateDefault + ")"); options.addOption("duration", true, "Minimum test duration in seconds: will wait for <duration> * <rate> requests to terminate or, if progress check enabled, no progress after <duration> (default " + durationDefault + ")"); options.addOption("maxconnections", true, "Maximum number of open connections (default " + maxConnectionsDefault + ")"); options.addOption("timeout", true, "Connection and read timeout in millis (default " + timeoutDefault + ")"); options.addOption("print", true, "Print cycle in millis, 0 to disable intermediate statistics (default " + printCycleDefault + ")"); options.addOption("check", true, "Progress check cycle in millis, 0 to disable progress check (default " + checkCycleDefault + ")"); options.addOption("stats", false, "Print full statistics when finish (default false)"); options.addOption("minmax", false, "Print min/mean/stddev/max stats when finish (default false)"); options.addOption("name", true, "Test name to print in the statistics (default '" + testNameDefault + "')"); options.addOption("help", false, "Print help"); try {/* w ww . jav a2 s . c om*/ final CommandLine cmd = new BasicParser().parse(options, args); final String[] ar = cmd.getArgs(); if (cmd.hasOption("help") || ar.length != 1) printUsageAndExit(options); final String url = ar[0]; final int timeout = Integer.parseInt(cmd.getOptionValue("timeout", timeoutDefault)); final int maxConnections = Integer .parseInt(cmd.getOptionValue("maxconnections", maxConnectionsDefault)); final int duration = Integer.parseInt(cmd.getOptionValue("duration", durationDefault)); final int printCycle = Integer.parseInt(cmd.getOptionValue("print", printCycleDefault)); final int checkCycle = Integer.parseInt(cmd.getOptionValue("check", checkCycleDefault)); final String testName = cmd.getOptionValue("name", testNameDefault); final int rate = Integer.parseInt(cmd.getOptionValue("rate", rateDefault)); final MetricRegistry metrics = new MetricRegistry(); final Meter requestMeter = metrics.meter("request"); final Meter responseMeter = metrics.meter("response"); final Meter errorsMeter = metrics.meter("errors"); final Logger log = LoggerFactory.getLogger(Photon.class); final ConcurrentHashMap<String, AtomicInteger> errors = new ConcurrentHashMap<>(); final HttpGet request = new HttpGet(url); final StripedTimeSeries<Long> sts = new StripedTimeSeries<>(30000, false); final StripedHistogram sh = new StripedHistogram(60000, 5); log.info("name: " + testName + " url:" + url + " rate:" + rate + " duration:" + duration + " maxconnections:" + maxConnections + ", " + "timeout:" + timeout); final DefaultConnectingIOReactor ioreactor = new DefaultConnectingIOReactor(IOReactorConfig.custom() .setConnectTimeout(timeout).setIoThreadCount(10).setSoTimeout(timeout).build()); Runtime.getRuntime().addShutdownHook(new Thread(() -> { final List<ExceptionEvent> events = ioreactor.getAuditLog(); if (events != null) events.stream().filter(event -> event != null).forEach(event -> { System.err.println( "Apache Async HTTP Client I/O Reactor Error Time: " + event.getTimestamp()); //noinspection ThrowableResultOfMethodCallIgnored if (event.getCause() != null) //noinspection ThrowableResultOfMethodCallIgnored event.getCause().printStackTrace(); }); if (cmd.hasOption("stats")) printFinishStatistics(errorsMeter, sts, sh, testName); if (!errors.keySet().isEmpty()) errors.entrySet().stream() .forEach(p -> log.info(testName + " " + p.getKey() + " " + p.getValue() + "ms")); System.out.println( testName + " responseTime(90%): " + sh.getHistogramData().getValueAtPercentile(90) + "ms"); if (cmd.hasOption("minmax")) { final HistogramData hd = sh.getHistogramData(); System.out.format("%s %8s%8s%8s%8s\n", testName, "min", "mean", "sd", "max"); System.out.format("%s %8d%8.2f%8.2f%8d\n", testName, hd.getMinValue(), hd.getMean(), hd.getStdDeviation(), hd.getMaxValue()); } })); final PoolingNHttpClientConnectionManager mngr = new PoolingNHttpClientConnectionManager(ioreactor); mngr.setDefaultMaxPerRoute(maxConnections); mngr.setMaxTotal(maxConnections); final CloseableHttpAsyncClient ahc = HttpAsyncClientBuilder.create().setConnectionManager(mngr) .setDefaultRequestConfig(RequestConfig.custom().setLocalAddress(null).build()).build(); try (final CloseableHttpClient client = new FiberHttpClient(ahc)) { final int num = duration * rate; final CountDownLatch cdl = new CountDownLatch(num); final Semaphore sem = new Semaphore(maxConnections); final RateLimiter rl = RateLimiter.create(rate); spawnStatisticsThread(printCycle, cdl, log, requestMeter, responseMeter, errorsMeter, testName); for (int i = 0; i < num; i++) { rl.acquire(); if (sem.availablePermits() == 0) log.debug("Maximum connections count reached, waiting..."); sem.acquireUninterruptibly(); new Fiber<Void>(() -> { requestMeter.mark(); final long start = System.nanoTime(); try { try (final CloseableHttpResponse ignored = client.execute(request)) { responseMeter.mark(); } catch (final Throwable t) { markError(errorsMeter, errors, t); } } catch (final Throwable t) { markError(errorsMeter, errors, t); } finally { final long now = System.nanoTime(); final long millis = TimeUnit.NANOSECONDS.toMillis(now - start); sts.record(start, millis); sh.recordValue(millis); sem.release(); cdl.countDown(); } }).start(); } spawnProgressCheckThread(log, duration, checkCycle, cdl); cdl.await(); } } catch (final ParseException ex) { System.err.println("Parsing failed. Reason: " + ex.getMessage()); } }
From source file:org.apache.pulsar.testclient.ManagedLedgerWriter.java
public static void main(String[] args) throws Exception { final Arguments arguments = new Arguments(); JCommander jc = new JCommander(arguments); jc.setProgramName("pulsar-perf-producer"); try {// w w w. j a v a2s. co m jc.parse(args); } catch (ParameterException e) { System.out.println(e.getMessage()); jc.usage(); System.exit(-1); } if (arguments.help) { jc.usage(); System.exit(-1); } arguments.testTime = TimeUnit.SECONDS.toMillis(arguments.testTime); // Dump config variables ObjectMapper m = new ObjectMapper(); ObjectWriter w = m.writerWithDefaultPrettyPrinter(); log.info("Starting Pulsar managed-ledger perf writer with config: {}", w.writeValueAsString(arguments)); byte[] payloadData = new byte[arguments.msgSize]; ByteBuf payloadBuffer = PooledByteBufAllocator.DEFAULT.directBuffer(arguments.msgSize); payloadBuffer.writerIndex(arguments.msgSize); // Now processing command line arguments String managedLedgerPrefix = "test-" + DigestUtils.sha1Hex(UUID.randomUUID().toString()).substring(0, 5); ClientConfiguration bkConf = new ClientConfiguration(); bkConf.setUseV2WireProtocol(true); bkConf.setAddEntryTimeout(30); bkConf.setReadEntryTimeout(30); bkConf.setThrottleValue(0); bkConf.setNumChannelsPerBookie(arguments.maxConnections); bkConf.setZkServers(arguments.zookeeperServers); ManagedLedgerFactoryConfig mlFactoryConf = new ManagedLedgerFactoryConfig(); mlFactoryConf.setMaxCacheSize(0); ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(bkConf, mlFactoryConf); ManagedLedgerConfig mlConf = new ManagedLedgerConfig(); mlConf.setEnsembleSize(arguments.ensembleSize); mlConf.setWriteQuorumSize(arguments.writeQuorum); mlConf.setAckQuorumSize(arguments.ackQuorum); mlConf.setMinimumRolloverTime(10, TimeUnit.MINUTES); mlConf.setMetadataEnsembleSize(arguments.ensembleSize); mlConf.setMetadataWriteQuorumSize(arguments.writeQuorum); mlConf.setMetadataAckQuorumSize(arguments.ackQuorum); mlConf.setDigestType(arguments.digestType); mlConf.setMaxSizePerLedgerMb(2048); List<CompletableFuture<ManagedLedger>> futures = new ArrayList<>(); for (int i = 0; i < arguments.numManagedLedgers; i++) { String name = String.format("%s-%03d", managedLedgerPrefix, i); CompletableFuture<ManagedLedger> future = new CompletableFuture<>(); futures.add(future); factory.asyncOpen(name, mlConf, new OpenLedgerCallback() { @Override public void openLedgerComplete(ManagedLedger ledger, Object ctx) { future.complete(ledger); } @Override public void openLedgerFailed(ManagedLedgerException exception, Object ctx) { future.completeExceptionally(exception); } }, null); } List<ManagedLedger> managedLedgers = futures.stream().map(CompletableFuture::join) .collect(Collectors.toList()); log.info("Created {} managed ledgers", managedLedgers.size()); Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { printAggregatedStats(); } }); Collections.shuffle(managedLedgers); AtomicBoolean isDone = new AtomicBoolean(); List<List<ManagedLedger>> managedLedgersPerThread = Lists.partition(managedLedgers, Math.max(1, managedLedgers.size() / arguments.numThreads)); for (int i = 0; i < arguments.numThreads; i++) { List<ManagedLedger> managedLedgersForThisThread = managedLedgersPerThread.get(i); int nunManagedLedgersForThisThread = managedLedgersForThisThread.size(); long numMessagesForThisThread = arguments.numMessages / arguments.numThreads; int maxOutstandingForThisThread = arguments.maxOutstanding; executor.submit(() -> { try { final double msgRate = arguments.msgRate / (double) arguments.numThreads; final RateLimiter rateLimiter = RateLimiter.create(msgRate); // Acquire 1 sec worth of messages to have a slower ramp-up rateLimiter.acquire((int) msgRate); final long startTime = System.currentTimeMillis(); final Semaphore semaphore = new Semaphore(maxOutstandingForThisThread); final AddEntryCallback addEntryCallback = new AddEntryCallback() { @Override public void addComplete(Position position, Object ctx) { long sendTime = (Long) (ctx); messagesSent.increment(); bytesSent.add(payloadData.length); long latencyMicros = NANOSECONDS.toMicros(System.nanoTime() - sendTime); recorder.recordValue(latencyMicros); cumulativeRecorder.recordValue(latencyMicros); semaphore.release(); } @Override public void addFailed(ManagedLedgerException exception, Object ctx) { log.warn("Write error on message", exception); System.exit(-1); } }; // Send messages on all topics/producers long totalSent = 0; while (true) { for (int j = 0; j < nunManagedLedgersForThisThread; j++) { if (arguments.testTime > 0) { if (System.currentTimeMillis() - startTime > arguments.testTime) { log.info("------------------- DONE -----------------------"); printAggregatedStats(); isDone.set(true); Thread.sleep(5000); System.exit(0); } } if (numMessagesForThisThread > 0) { if (totalSent++ >= numMessagesForThisThread) { log.info("------------------- DONE -----------------------"); printAggregatedStats(); isDone.set(true); Thread.sleep(5000); System.exit(0); } } semaphore.acquire(); rateLimiter.acquire(); final long sendTime = System.nanoTime(); managedLedgersForThisThread.get(j).asyncAddEntry(payloadBuffer, addEntryCallback, sendTime); } } } catch (Throwable t) { log.error("Got error", t); } }); } // Print report stats long oldTime = System.nanoTime(); Histogram reportHistogram = null; while (true) { try { Thread.sleep(10000); } catch (InterruptedException e) { break; } if (isDone.get()) { break; } long now = System.nanoTime(); double elapsed = (now - oldTime) / 1e9; double rate = messagesSent.sumThenReset() / elapsed; double throughput = bytesSent.sumThenReset() / elapsed / 1024 / 1024 * 8; reportHistogram = recorder.getIntervalHistogram(reportHistogram); log.info( "Throughput produced: {} msg/s --- {} Mbit/s --- Latency: mean: {} ms - med: {} - 95pct: {} - 99pct: {} - 99.9pct: {} - 99.99pct: {} - Max: {}", throughputFormat.format(rate), throughputFormat.format(throughput), dec.format(reportHistogram.getMean() / 1000.0), dec.format(reportHistogram.getValueAtPercentile(50) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(95) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.9) / 1000.0), dec.format(reportHistogram.getValueAtPercentile(99.99) / 1000.0), dec.format(reportHistogram.getMaxValue() / 1000.0)); reportHistogram.reset(); oldTime = now; } factory.shutdown(); }
From source file:Main.java
/** * Runs each runnable in a new thread. This method blocks until all runnables are complete. * Before any runnables are run, we also wait until the allocated thread has ran at least once. * This is done to increase the randomness in the order of thread execution. *//*from w w w . ja va 2 s. c om*/ public static void startMultipleThreadsAndWaitUntilComplete(final List<Runnable> runnables) throws Exception { final Semaphore competingThreadsStarted = new Semaphore(0); // Number of threads for runnables started. final Semaphore competingThreadsToRelease = new Semaphore(0); // Acquired by runnable threads. Will be released // once all runnables have been run once. final Semaphore competingThreadsCompleted = new Semaphore(0); // Number of runnable threads completed. for (int i = 0; i < runnables.size(); i++) { final int runnableIndex = i; new Thread(new Runnable() { @Override public void run() { try { // Notify semaphore that this thread has been started. competingThreadsStarted.release(1); // Once all threads have notified the competingThreadsStarted semaphore, // competingThreadsToRelease will be released and we will continue. competingThreadsToRelease.acquire(1); // Increases randomness of thread execution order. Thread.sleep(1); runnables.get(runnableIndex).run(); // thread has completed running provided runnable. competingThreadsCompleted.release(1); } catch (final InterruptedException e) { e.printStackTrace(); } } }).start(); } // Only proceed once all threads have at least started running once. competingThreadsStarted.acquire(runnables.size()); // Release all threads. competingThreadsToRelease.release(runnables.size()); // Wait until all threads have completed before returning. competingThreadsCompleted.acquire(runnables.size()); }
From source file:Main.java
/** * Locks a semaphore for handling multi threading with insert/delete/update methods. * * @param key key for inserting the locked semaphore in a hashmap. * @return the locked semaphore./*from w w w . j a v a2 s. c o m*/ */ public static boolean lockChat(String key) { Semaphore semaphore = new Semaphore(1); Semaphore oldSemaphore = semaphoreMap.putIfAbsent(key, semaphore); if (oldSemaphore != null) { semaphore = oldSemaphore; } //semaphore.tryAcquire(30, TimeUnit.SECONDS); try { semaphore.acquire(); return true; } catch (InterruptedException ignored) { Log.w(TAG, "Could not acquire chat: " + key); return false; } }
From source file:org.commoncrawl.util.MapReduceJobStatsWriter.java
public static void main(String[] args) { LOG.info("Initializing Hadoop Config"); Configuration conf = new Configuration(); conf.addResource("nutch-default.xml"); conf.addResource("nutch-site.xml"); conf.addResource("hadoop-default.xml"); conf.addResource("hadoop-site.xml"); conf.addResource("commoncrawl-default.xml"); conf.addResource("commoncrawl-site.xml"); CrawlEnvironment.setHadoopConfig(conf); CrawlEnvironment.setDefaultHadoopFSURI("hdfs://ccn01:9000/"); // test the stats Writer ... try {/* w ww. java 2s. c o m*/ LOG.info("Opening Stats Writer"); MapReduceJobStatsWriter<IntWritable, Text> statsWriter = new MapReduceJobStatsWriter<IntWritable, Text>( CrawlEnvironment.getDefaultFileSystem(), conf, IntWritable.class, Text.class, "test", "group1", 12345L); LOG.info("Writing Entries"); for (int i = 0; i < 1000; ++i) { statsWriter.appendLogEntry(new IntWritable(i), new Text("Log Entry #" + i)); } LOG.info("Flushing / Closing"); final Semaphore blockingSempahore = new Semaphore(0); statsWriter.close(new Callback() { @Override public void execute() { LOG.info("Completion Callback Triggered"); blockingSempahore.release(); } }); LOG.info("Waiting on Semaphore"); blockingSempahore.acquireUninterruptibly(); LOG.info("Acquired Semaphore"); LOG.info("Closed"); Path hdfsPath = new Path(Environment.HDFS_LOGCOLLECTOR_BASEDIR, "test" + "/" + "group1" + "/" + Long.toString(12345L)); LOG.info("Opening Reader"); SequenceFile.Reader reader = new SequenceFile.Reader(CrawlEnvironment.getDefaultFileSystem(), hdfsPath, conf); IntWritable key = new IntWritable(); Text value = new Text(); while (reader.next(key, value)) { LOG.info("Key:" + key.get() + " Value:" + value.toString()); } reader.close(); } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); } }
From source file:org.marekasf.troughput.XYHistogramChart.java
public static void display(final AdaptiveHistogram h, final String title) { final XYHistogramChart demo = new XYHistogramChart(h, title); demo.pack();//from ww w .j a v a2 s.co m RefineryUtilities.centerFrameOnScreen(demo); demo.setVisible(true); final Semaphore semaphore = new Semaphore(0); demo.addWindowListener(new WindowAdapter() { @Override public void windowClosing(final WindowEvent we) { semaphore.release(); } }); try { semaphore.acquire(); } catch (final InterruptedException e) { // } }
From source file:Main.java
public void runTest() throws Exception { ThreadPoolExecutor tp = new ThreadPoolExecutor(1, 1, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>()); tp.setRejectedExecutionHandler(// ww w. j a v a2s. c o m (Runnable r, ThreadPoolExecutor executor) -> System.out.println("Task rejected: " + r)); Semaphore oneTaskDone = new Semaphore(0); tp.execute(() -> { System.out.println("Sleeping"); try { Thread.sleep(300); } catch (Exception e) { e.printStackTrace(); } System.out.println("Done sleeping"); oneTaskDone.release(); }); tp.execute(new Runnable() { @Override public void run() { System.out.println("Never happends"); } @Override public String toString() { return "Rejected Runnable"; } }); oneTaskDone.acquire(); tp.execute(() -> System.out.println("Running")); tp.shutdown(); tp.awaitTermination(100, TimeUnit.MILLISECONDS); System.out.println("Finished"); }
From source file:org.commoncrawl.service.parser.client.Dispatcher.java
public static void main(String[] args) throws IOException { Configuration conf = new Configuration(); CrawlEnvironment.setHadoopConfig(conf); String baseURL = "http://unknown.com/"; if (args.length != 0) { baseURL = args[0];//from w w w . j a va2 s .c o m } URL baseURLObj; try { baseURLObj = new URL(baseURL); } catch (MalformedURLException e2) { throw new IOException("Invalid Base Link"); } final URL finalBaseURL = (baseURLObj != null) ? baseURLObj : null; final DataOutputBuffer headerBuffer = new DataOutputBuffer(); final DataOutputBuffer contentBuffer = new DataOutputBuffer(); try { ByteStreams.readBytes(new InputSupplier<InputStream>() { @Override public InputStream getInput() throws IOException { return System.in; } }, new ByteProcessor<Long>() { @Override public Long getResult() { return 0L; } int currLineCharCount = 0; boolean processingHeaders = true; @Override public boolean processBytes(byte[] buf, int start, int length) throws IOException { if (processingHeaders) { int current = start; int end = current + length; while (processingHeaders && current != end) { if (buf[current] != '\r' && buf[current] != '\n') { currLineCharCount++; } else if (buf[current] == '\n') { if (currLineCharCount == 0) { headerBuffer.write(buf, start, current - start + 1); processingHeaders = false; } currLineCharCount = 0; } current++; } if (processingHeaders) { headerBuffer.write(buf, start, length); } else { length -= current - start; start = current; } } if (!processingHeaders) { contentBuffer.write(buf, start, length); } return true; } }); LOG.info("HEADER LEN:" + headerBuffer.getLength()); // System.out.println(new String(headerBuffer.getData(),0,headerBuffer.getLength(),Charset.forName("UTF-8"))); LOG.info("CONTENT LEN:" + contentBuffer.getLength()); //System.out.println(new String(contentBuffer.getData(),0,contentBuffer.getLength(),Charset.forName("UTF-8"))); // decode header bytes ... String header = ""; if (headerBuffer.getLength() != 0) { try { header = new String(headerBuffer.getData(), 0, headerBuffer.getLength(), Charset.forName("UTF-8")); } catch (Exception e) { LOG.warn(CCStringUtils.stringifyException(e)); header = new String(headerBuffer.getData(), 0, headerBuffer.getLength(), Charset.forName("ASCII")); } } final String headersFinal = (header != null) ? header : ""; LOG.info("Starting Event Loop"); final EventLoop eventLoop = new EventLoop(); eventLoop.start(); try { // create fake hosts file ... //String hosts = "10.0.20.101:8072"; // reader //Reader reader = new StringReader(hosts); // dispatcher init LOG.info("initializing Dispatcher"); final Dispatcher dispatcher = new Dispatcher(eventLoop, "parserNodes"); LOG.info("Waiting for a few seconds"); Thread.sleep(5000); Thread threads[] = new Thread[TEST_THREAD_COUNT]; final Semaphore threadWaitSem = new Semaphore(-TEST_THREAD_COUNT - 1); // start 100 threads for (int threadIdx = 0; threadIdx < TEST_THREAD_COUNT; ++threadIdx) { threads[threadIdx] = new Thread(new Runnable() { @Override public void run() { for (int i = 0; i < ITERATIONS_PER_THREAD; ++i) { // build parse request ParseRequest request = new ParseRequest(); request.setDocId(1); request.setDomainId(1); request.setDocURL(finalBaseURL.toString()); request.setDocHeaders(headersFinal); request.setDocContent( new FlexBuffer(contentBuffer.getData(), 0, contentBuffer.getLength())); //LOG.info("Dispatching parse request"); ParseResult result = dispatcher.dispatchRequest(request); LOG.info("TID[" + Thread.currentThread().getId() + "]ReqID[" + i + "]" + " Success:" + ((result != null) ? result.getParseSuccessful() : false) + " LinkCount:" + ((result != null) ? result.getExtractedLinks().size() : 0)); } LOG.info("Thread:" + Thread.currentThread().getId() + " Exiting"); threadWaitSem.release(); } }); threads[threadIdx].start(); } LOG.info("Waiting for threads to die"); threadWaitSem.acquireUninterruptibly(); LOG.info("All Threads dead."); } finally { eventLoop.stop(); } } catch (IOException e) { LOG.error(CCStringUtils.stringifyException(e)); } catch (InterruptedException e) { } }
From source file:org.obiba.mica.config.EsConcurrencyConfiguration.java
@Bean(name = "esJoinQueriesSemaphore") @Scope(ConfigurableBeanFactory.SCOPE_SINGLETON) public Semaphore getSemaphore() { return new Semaphore(propertyResolver.getProperty("maxConcurrentJoinQueries", Integer.class, DEFAULT_MAX_CONCURRENT_MAX_JOIN_QUERIES)); }