List of usage examples for java.util.concurrent Semaphore Semaphore
public Semaphore(int permits)
From source file:org.telegram.ui.ProfileActivity.java
@Override public boolean onFragmentCreate() { user_id = arguments.getInt("user_id", 0); chat_id = getArguments().getInt("chat_id", 0); if (user_id != 0) { dialog_id = arguments.getLong("dialog_id", 0); if (dialog_id != 0) { currentEncryptedChat = MessagesController.getInstance().getEncryptedChat((int) (dialog_id >> 32)); }//from w w w . jav a 2 s .c o m TLRPC.User user = MessagesController.getInstance().getUser(user_id); if (user == null) { return false; } NotificationCenter.getInstance().addObserver(this, NotificationCenter.updateInterfaces); NotificationCenter.getInstance().addObserver(this, NotificationCenter.contactsDidLoaded); NotificationCenter.getInstance().addObserver(this, NotificationCenter.encryptedChatCreated); NotificationCenter.getInstance().addObserver(this, NotificationCenter.encryptedChatUpdated); NotificationCenter.getInstance().addObserver(this, NotificationCenter.blockedUsersDidLoaded); NotificationCenter.getInstance().addObserver(this, NotificationCenter.botInfoDidLoaded); NotificationCenter.getInstance().addObserver(this, NotificationCenter.userInfoDidLoaded); if (currentEncryptedChat != null) { NotificationCenter.getInstance().addObserver(this, NotificationCenter.didReceivedNewMessages); } userBlocked = MessagesController.getInstance().blockedUsers.contains(user_id); if (user.bot) { BotQuery.loadBotInfo(user.id, true, classGuid); } MessagesController.getInstance().loadFullUser(MessagesController.getInstance().getUser(user_id), classGuid, true); participantsMap = null; } else if (chat_id != 0) { currentChat = MessagesController.getInstance().getChat(chat_id); if (currentChat == null) { final Semaphore semaphore = new Semaphore(0); MessagesStorage.getInstance().getStorageQueue().postRunnable(new Runnable() { @Override public void run() { currentChat = MessagesStorage.getInstance().getChat(chat_id); semaphore.release(); } }); try { semaphore.acquire(); } catch (Exception e) { FileLog.e("tmessages", e); } if (currentChat != null) { MessagesController.getInstance().putChat(currentChat, true); } else { return false; } } if (currentChat.megagroup) { getChannelParticipants(true); } else { participantsMap = null; } NotificationCenter.getInstance().addObserver(this, NotificationCenter.chatInfoDidLoaded); sortedUsers = new ArrayList<>(); updateOnlineCount(); avatarUpdater = new AvatarUpdater(); avatarUpdater.delegate = new AvatarUpdater.AvatarUpdaterDelegate() { @Override public void didUploadedPhoto(TLRPC.InputFile file, TLRPC.PhotoSize small, TLRPC.PhotoSize big) { if (chat_id != 0) { MessagesController.getInstance().changeChatAvatar(chat_id, file); } } }; avatarUpdater.parentFragment = this; if (ChatObject.isChannel(currentChat)) { MessagesController.getInstance().loadFullChat(chat_id, classGuid, true); } } else { return false; } if (dialog_id != 0) { SharedMediaQuery.getMediaCount(dialog_id, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true); } else if (user_id != 0) { SharedMediaQuery.getMediaCount(user_id, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true); } else if (chat_id > 0) { SharedMediaQuery.getMediaCount(-chat_id, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true); if (mergeDialogId != 0) { SharedMediaQuery.getMediaCount(mergeDialogId, SharedMediaQuery.MEDIA_PHOTOVIDEO, classGuid, true); } } NotificationCenter.getInstance().addObserver(this, NotificationCenter.mediaCountDidLoaded); NotificationCenter.getInstance().addObserver(this, NotificationCenter.updateInterfaces); NotificationCenter.getInstance().addObserver(this, NotificationCenter.closeChats); updateRowsIds(); return true; }
From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java
@Test public void testBatchesRequests() throws InterruptedException { _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> { // Annotations Assert.assertEquals(0, r.getAnnotationsCount()); // Dimensions Assert.assertEquals(0, r.getDimensionsCount()); // Samples assertSample(r.getTimersList(), "timer", 7d); assertSample(r.getCountersList(), "counter", 8d); assertSample(r.getGaugesList(), "gauge", 9d); })).willReturn(WireMock.aResponse().withStatus(200))); final AtomicBoolean assertionResult = new AtomicBoolean(false); final Semaphore semaphore = new Semaphore(0); final Sink sink = new ApacheHttpSink.Builder() .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(10) .setParallelism(1).setEmptyQueueInterval(Duration.ofMillis(1000)) .setEventHandler(new AttemptCompletedAssertionHandler(assertionResult, 3, 210, true, new CompletionHandler(semaphore))) .build();//from ww w.j a v a2s .co m final TsdEvent event = new TsdEvent(Collections.emptyMap(), createQuantityMap("timer", TsdQuantity.newInstance(7d, null)), createQuantityMap("counter", TsdQuantity.newInstance(8d, null)), createQuantityMap("gauge", TsdQuantity.newInstance(9d, null))); for (int x = 0; x < 3; x++) { sink.record(event); } semaphore.acquire(); // Ensure expected handler was invoked Assert.assertTrue(assertionResult.get()); // Request matcher final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH)) .withHeader("Content-Type", WireMock.equalTo("application/octet-stream")); // Assert that data was sent _wireMockRule.verify(1, requestPattern); Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty()); }
From source file:jp.primecloud.auto.puppet.PuppetClient.java
/** * runPermits???/*from w ww .j a va 2 s. co m*/ * * @param runPermits runPermits */ public void setRunPermits(Integer runPermits) { this.runSemaphore = runPermits == null ? null : new Semaphore(runPermits.intValue()); }
From source file:com.alibaba.napoli.client.benchmark.NapoliNormalQueueTest.java
@Test public void sendMessageWithSenderStoreDisabledTest() throws Exception { log.info("start to execute sendMessageWithSenderStoreDisabledTest"); long beginQueueSize = JmxUtil.getQueueSize(sendConnector.getAddress(), queueName); qSender = new DefaultAsyncSender(); qSender.setConnector(sendConnector); qSender.setName(queueName);/*w w w.j a va2s. com*/ qSender.setStoreEnable(false); qSender.setReprocessInterval(10000 * 1000 * 1000); qSender.init(); int tc = 10; log.info("yanny requestcount = " + System.getProperty("requestCount")); final int tp = Integer.parseInt(System.getProperty("requestCount", "20")); final Semaphore semaphore = new Semaphore(tc); final AtomicInteger sumCount = new AtomicInteger(); final AtomicInteger requestCount = new AtomicInteger(); long startTime = System.currentTimeMillis(); log.info("Yanny start send request " + startTime); for (int i = 0; i < tc; i++) { Thread t = new Thread("thread--" + i) { public void run() { try { //?tringap??Serializable semaphore.acquire(); Person person = new Person(); person.setLoginName("superman"); person.setEmail("sm@1.com"); person.setPenName("pname"); person.setStatus(PersonStatus.ENABLED); for (int j = 0; j < tp; j++) { // log.info("hello"); int id = requestCount.incrementAndGet(); person.setPersonId("" + id); //?? ??true???alse boolean result = qSender.send(person); if (!result) { log.info("----------------send to queue " + "result is false. personid=" + j); } else { sumCount.incrementAndGet(); } } } catch (Throwable t) { t.printStackTrace(); } finally { semaphore.release(); } } }; t.start(); } while (semaphore.availablePermits() != tc) { Thread.sleep(100); } int totalRequest = tc * tp; long endTime = System.currentTimeMillis(); log.info("yanny: send " + totalRequest + " message, take " + (endTime - startTime) + " milseconds"); JmxUtil.waitTillQueueSizeAsTarget(sendConnector.getAddress(), queueName, beginQueueSize); endTime = System.currentTimeMillis(); String errorMessage = ""; long qBdbCount = NapoliTestUtil.getStoreSize(sendConnector.getSenderKVStore(qSender.getName())); log.info("yanny totalRequest " + totalRequest + " send queue success " + sumCount + " local store count:" + qBdbCount + " queue received " + qWorker.getAccessNum() + " take " + (endTime - startTime) + " milseconds"); log.info(initConsumeMessage); log.info("NapoliNormalQueueTest's success=" + qWorker.getAccessNum() + " bdb's size=" + qBdbCount); if (qBdbCount > 0) { errorMessage += ";with store disabled, local store count should be empty, but is " + qBdbCount; } //with store enabled, all send should succeed. if (qSender.getStoreEnable()) { if (sumCount.get() != totalRequest) { errorMessage += ";with store enabled, all send should return success, but not equal now. send succeed " + sumCount.get() + "; total request:" + totalRequest; } } else { if (sumCount.get() < totalRequest * 0.95) { errorMessage += ";with store disabled, expected more than 95% message send succeed, total request:" + totalRequest + "; send succeed " + sumCount.get(); } } //?????otalRequest,??>=sum if (totalRequest < qWorker.getAccessNum()) { errorMessage += ";queue should not have success messages more than send succeed" + sumCount.get() + " (success " + qWorker.getAccessNum() + ")"; } //?qBdbCount? if ((sumCount.get() - qBdbCount) > qWorker.getAccessNum()) { errorMessage += ";queue received message (" + qWorker.getAccessNum() + ") less than send succeed - local stored message, message lost " + (sumCount.get() - qBdbCount); } int allowedDiff = (int) Math.round(sumCount.get() * 0.001); if (((qWorker.getAccessNum() + qBdbCount) - sumCount.get()) > allowedDiff) { errorMessage += "queue received message should not have more than send succeed + " + allowedDiff + " than allowed (0.1%), gap " + ((qWorker.getAccessNum() + qBdbCount) - sumCount.get()); } assertTrue(errorMessage, errorMessage.equals("")); verify(napoliSenderStat, atMost(qWorker.getAccessNum())).sendSuccess(anyLong(), anyLong()); verify(napoliSenderStat, atLeast((int) (sumCount.get() - qBdbCount))).sendSuccess(anyLong(), anyLong()); verify(napoliSenderStat, times(totalRequest - sumCount.get())).sendFalse(anyLong(), anyLong()); verify(napoliSenderStat, times((int) qBdbCount)).sendFailure(anyLong(), anyLong()); verify(napoliReceiverStat, times((int) qWorker.getAccessNum())).receiveSuccess(anyLong(), anyLong()); }
From source file:org.commoncrawl.service.crawler.CrawlSegmentLog.java
/** sync the incoming segment against the local crawl log and then send it up to the history server **/ public int syncToLog(CrawlSegmentFPMap segmentDetail) throws IOException { if (Environment.detailLogEnabled()) LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Syncing Progress Log"); int itemsProcessed = 0; // and construct a path to the local crawl segment directory ... File activeLogPath = buildActivePath(_rootDataDir, _listId, _segmentId); File checkpointLogPath = buildCheckpointPath(_rootDataDir, _listId, _segmentId); // check if it exists ... if (checkpointLogPath.exists()) { // log it ... if (Environment.detailLogEnabled()) LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Checkpoint Log Found"); // rename it as the active log ... checkpointLogPath.renameTo(activeLogPath); }/* www . j av a2 s .co m*/ if (activeLogPath.exists()) { // reconcile against active log (if it exists) ... _localLogItemCount = reconcileLogFile(FileSystem.getLocal(CrawlEnvironment.getHadoopConfig()), new Path(activeLogPath.getAbsolutePath()), _listId, _segmentId, segmentDetail, null); if (Environment.detailLogEnabled()) LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Reconciled Local Log File with ProcessedItemCount:" + _localLogItemCount); itemsProcessed += _localLogItemCount; } FileSystem hdfs = CrawlEnvironment.getDefaultFileSystem(); // first things first ... check to see if special completion log file exists in hdfs Path hdfsSegmentCompletionLogPath = new Path( CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + getListId() + "/" + getSegmentId() + "/" + CrawlEnvironment.buildCrawlSegmentCompletionLogFileName(getNodeName())); if (hdfs.exists(hdfsSegmentCompletionLogPath)) { if (Environment.detailLogEnabled()) LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Completion File Found. Marking Segment Complete"); // if the file exists then this segment has been crawled and uploaded already ... // if active log file exists ... delete it ... if (activeLogPath.exists()) activeLogPath.delete(); //reset local log item count ... _localLogItemCount = 0; itemsProcessed = -1; // remove all hosts from segment segmentDetail._urlsComplete = segmentDetail._urlCount; } else { if (segmentDetail != null) { if (Environment.detailLogEnabled()) LOG.info("### SYNC: Building BulkItem History Query for List:" + _listId + " Segment:" + _segmentId); BulkItemHistoryQuery query = buildHistoryQueryBufferFromMap(segmentDetail); if (query != null) { // create blocking semaphore ... final Semaphore semaphore = new Semaphore(1); semaphore.acquireUninterruptibly(); if (Environment.detailLogEnabled()) LOG.info("### SYNC: Dispatching query to history server"); //create an outer response object we can pass aysnc response to ... final BulkItemHistoryQueryResponse outerResponse = new BulkItemHistoryQueryResponse(); CrawlerServer.getServer().getHistoryServiceStub().bulkItemQuery(query, new Callback<BulkItemHistoryQuery, BulkItemHistoryQueryResponse>() { @Override public void requestComplete( final AsyncRequest<BulkItemHistoryQuery, BulkItemHistoryQueryResponse> request) { // response returns in async thread context ... if (request.getStatus() == Status.Success) { if (Environment.detailLogEnabled()) LOG.info( "###SYNC: bulk Query to history server succeeded. setting out resposne"); ImmutableBuffer buffer = request.getOutput().getResponseList(); outerResponse.setResponseList( new Buffer(buffer.getReadOnlyBytes(), 0, buffer.getCount())); } else { LOG.error("###SYNC: bulk Query to history server failed."); } // release semaphore semaphore.release(); } }); LOG.info("###SYNC: Loader thread blocked waiting for bulk query response"); semaphore.acquireUninterruptibly(); LOG.info("###SYNC: Loader thread received response from history server"); if (outerResponse.getResponseList().getCount() == 0) { LOG.error("###SYNC: History Server Bulk Query Returned NULL!!! for List:" + _listId + " Segment:" + _segmentId); } else { // ok time to process the response and integrate the results into the fp list updateFPMapFromBulkQueryResponse(segmentDetail, outerResponse); } } else { if (Environment.detailLogEnabled()) LOG.warn("### SYNC: No fingerprints found when processing segment detail for List:" + _listId + " Segment:" + _segmentId); segmentDetail._urlsComplete = segmentDetail._urlCount; } } /* // and now walk hdfs looking for any checkpointed logs ... // scan based on checkpoint filename ... FileStatus[] remoteCheckpointFiles = hdfs.globStatus(new Path(CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + getListId() + "/" + getSegmentId() + "/" + CrawlEnvironment.buildCrawlSegmentLogCheckpointWildcardString(getNodeName()))); if (remoteCheckpointFiles != null) { LOG.info("### SYNC: List:"+ _listId + " Segment:" + _segmentId +" Found Remote Checkpoint Files"); // create a temp file to hold the reconciled log ... File consolidatedLogFile = null; if (remoteCheckpointFiles.length > 1) { // create temp log file ... consolidatedLogFile = File.createTempFile("SegmentLog", Long.toString(System.currentTimeMillis())); // write out header ... CrawlSegmentLog.writeHeader(consolidatedLogFile,0); } // walk the files for(FileStatus checkpointFilePath : remoteCheckpointFiles) { // and reconcile them against segment ... itemsProcessed += reconcileLogFile(hdfs,checkpointFilePath.getPath(),getListId(),getSegmentId(),segmentDetail,consolidatedLogFile); LOG.info("### SYNC: List:"+ _listId + " Segment:" + _segmentId +" Processed Checkpoint File:" + checkpointFilePath.getPath() + " Items Processed:" + itemsProcessed); } // finally ... if consolidatedLogFile is not null if (consolidatedLogFile != null) { // build a new hdfs file name ... Path consolidatedHDFSPath = new Path(CrawlEnvironment.getCrawlSegmentDataDirectory() + "/" + getListId() + "/" + getSegmentId() + "/" + CrawlEnvironment.buildCrawlSegmentLogCheckpointFileName(getNodeName(), System.currentTimeMillis())); LOG.info("### SYNC: List:"+ _listId + " Segment:" + _segmentId +" Writing Consolidated Log File:" + consolidatedHDFSPath + " to HDFS"); // and copy local file to log ... hdfs.copyFromLocalFile(new Path(consolidatedLogFile.getAbsolutePath()),consolidatedHDFSPath); // and delete all previous log file entries ... for (FileStatus oldCheckPointFile : remoteCheckpointFiles) { hdfs.delete(oldCheckPointFile.getPath()); } consolidatedLogFile.delete(); } } */ } if (segmentDetail != null) { _remainingURLS += (segmentDetail._urlCount - segmentDetail._urlsComplete); // mark url count as valid now ... _urlCountValid = true; // now if remaining url count is zero ... then mark the segment as complete ... if (_remainingURLS == 0 && _localLogItemCount == 0) { _segmentComplete = true; } } if (Environment.detailLogEnabled()) LOG.info("### SYNC: List:" + _listId + " Segment:" + _segmentId + " Done Syncing Progress Log TotalURLS:" + segmentDetail._urlCount + " RemainingURLS:" + _remainingURLS + " LocalLogItemCount:" + _localLogItemCount); return itemsProcessed; }
From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java
@Override public boolean unregister(final ClusterConfig conf) { try {/*w w w .j a v a 2 s .c om*/ if (String.valueOf(advanceConf.get(Constant.Keys.REGISTER_LEVEL)) .equalsIgnoreCase(Constant.RegisterLevel.LEVEL1.toString())) { return true; } if (!initializeDataMembers(conf)) { return false; } final String infoMsg = "Unregistering " + getComponentName() + "..."; logger.info(infoMsg, getComponentName()); // Getting node map for cluster deployment Map<String, Map<String, Object>> nodeMap = new HashMap<String, Map<String, Object>>( compConfig.getNodes()); // Node Registration process ... final Semaphore semaphore = new Semaphore(nodeMap.size()); for (final String host : nodeMap.keySet()) { semaphore.acquire(); AppStoreWrapper.getExecutor().execute(new Runnable() { @Override public void run() { conf.getNodes().get(host).setStatus(unregisterNode(host)); if (semaphore != null) { semaphore.release(); } } }); } semaphore.acquire(nodeMap.size()); // Return false if any of the node is not deployed. return AnkushUtils.getStatus(conf.getNodes()); } catch (Exception e) { logger.error(e.getMessage()); return false; } }
From source file:com.parse.ParsePushTest.java
@Test public void testUnsubscribeInBackgroundWithCallbackSuccess() throws Exception { final ParsePushChannelsController controller = mock(ParsePushChannelsController.class); when(controller.unsubscribeInBackground(anyString())).thenReturn(Task.<Void>forResult(null)); ParseCorePlugins.getInstance().registerPushChannelsController(controller); ParsePush push = new ParsePush(); final Semaphore done = new Semaphore(0); final Capture<Exception> exceptionCapture = new Capture<>(); push.unsubscribeInBackground("test", new SaveCallback() { @Override//from w w w.j a v a2 s . com public void done(ParseException e) { exceptionCapture.set(e); done.release(); } }); assertNull(exceptionCapture.get()); assertTrue(done.tryAcquire(1, 10, TimeUnit.SECONDS)); verify(controller, times(1)).unsubscribeInBackground("test"); }
From source file:com.impetus.ankush2.cassandra.deployer.CassandraDeployer.java
@Override public boolean unregister(final ClusterConfig conf) { try {//from w ww .j a va 2s. c o m ComponentConfig compConfig = clusterConfig.getComponents().get(this.componentName); if (!AnkushUtils.isMonitoredByAnkush(compConfig)) { logger.info("Skipping " + getComponentName() + " unregistration for Level1", this.componentName); return true; } if (!setClassVariables(conf)) { return false; } final String infoMsg = "Unregistering Cassandra"; logger.info(infoMsg, getComponentName()); // Getting node map for cluster deployment Map<String, Map<String, Object>> nodeMap = new HashMap<String, Map<String, Object>>( componentConfig.getNodes()); // Node Registration process ... final Semaphore semaphore = new Semaphore(nodeMap.size()); for (final String host : nodeMap.keySet()) { semaphore.acquire(); AppStoreWrapper.getExecutor().execute(new Runnable() { @Override public void run() { conf.getNodes().get(host).setStatus(unregisterNode(host)); if (semaphore != null) { semaphore.release(); } } }); } semaphore.acquire(nodeMap.size()); // Return false if any of the node is not deployed. return AnkushUtils.getStatus(conf.getNodes()); } catch (AnkushException e) { return addClusterError(e.getMessage(), e); } catch (Exception e) { return addClusterError("Could not unregister " + getComponentName(), e); } }
From source file:com.google.acre.script.NHttpClient.java
public NHttpClient(int max_connections) { _max_connections = max_connections;/*from w w w . jav a2 s .co m*/ _costCollector = CostCollector.getInstance(); BasicHttpProcessor httpproc = new BasicHttpProcessor(); httpproc.addInterceptor(new RequestContent()); httpproc.addInterceptor(new RequestTargetHost()); httpproc.addInterceptor(new RequestConnControl()); httpproc.addInterceptor(new RequestUserAgent()); httpproc.addInterceptor(new RequestExpectContinue()); BufferingHttpClientHandler handler = new BufferingHttpClientHandler(httpproc, new NHttpRequestExecutionHandler(), new DefaultConnectionReuseStrategy(), DEFAULT_HTTP_PARAMS); handler.setEventListener(new EventListener() { private final static String REQUEST_CLOSURE = "request-closure"; public void connectionClosed(NHttpConnection conn) { // pass (should we be logging this?) } public void connectionOpen(NHttpConnection conn) { // pass (should we be logging this?) } public void connectionTimeout(NHttpConnection conn) { noteException(null, conn); } void noteException(Exception e, NHttpConnection conn) { HttpContext context = conn.getContext(); NHttpClientClosure closure = (NHttpClientClosure) context.getAttribute(REQUEST_CLOSURE); if (closure != null) closure.exceptions().add(e); } public void fatalIOException(IOException e, NHttpConnection conn) { noteException(e, conn); } public void fatalProtocolException(HttpException e, NHttpConnection conn) { noteException(e, conn); } }); try { SSLContext sctx = SSLContext.getInstance("SSL"); sctx.init(null, null, null); _dispatch = new NHttpAdaptableSensibleAndLogicalIOEventDispatch(handler, sctx, DEFAULT_HTTP_PARAMS); } catch (java.security.KeyManagementException e) { throw new RuntimeException(e); } catch (java.security.NoSuchAlgorithmException e) { throw new RuntimeException(e); } _requests = new ArrayList<NHttpClientClosure>(); _connection_lock = new Semaphore(_max_connections); }
From source file:com.arpnetworking.metrics.impl.ApacheHttpSinkTest.java
@Test public void testBatchesRequestsRespectsMax() throws InterruptedException { _wireMockRule.stubFor(WireMock.requestMatching(new RequestValueMatcher(r -> { // Annotations Assert.assertEquals(0, r.getAnnotationsCount()); // Dimensions Assert.assertEquals(0, r.getDimensionsCount()); // Samples assertSample(r.getTimersList(), "timer", 7d); assertSample(r.getCountersList(), "counter", 8d); assertSample(r.getGaugesList(), "gauge", 9d); })).willReturn(WireMock.aResponse().withStatus(200))); final Semaphore semaphore = new Semaphore(-2); final Sink sink = new ApacheHttpSink.Builder() .setUri(URI.create("http://localhost:" + _wireMockRule.port() + PATH)).setMaxBatchSize(2) .setParallelism(1).setEmptyQueueInterval(Duration.ofMillis(1000)) .setEventHandler(new CompletionHandler(semaphore)).build(); final TsdEvent event = new TsdEvent(Collections.emptyMap(), createQuantityMap("timer", TsdQuantity.newInstance(7d, null)), createQuantityMap("counter", TsdQuantity.newInstance(8d, null)), createQuantityMap("gauge", TsdQuantity.newInstance(9d, null))); for (int x = 0; x < 5; x++) { sink.record(event);/*from w w w . j a v a 2 s .c om*/ } semaphore.acquire(); // Request matcher final RequestPatternBuilder requestPattern = WireMock.postRequestedFor(WireMock.urlEqualTo(PATH)) .withHeader("Content-Type", WireMock.equalTo("application/octet-stream")); // Assert that data was sent _wireMockRule.verify(3, requestPattern); Assert.assertTrue(_wireMockRule.findUnmatchedRequests().getRequests().isEmpty()); }