List of usage examples for java.util.concurrent CyclicBarrier CyclicBarrier
public CyclicBarrier(int parties)
From source file:org.apache.hadoop.contrib.bkjournal.TestBookKeeperJournalManager.java
/** * Tests that concurrent calls to format will still allow one to succeed. */// w ww. j ava2 s .co m @Test public void testConcurrentFormat() throws Exception { final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat"); final NamespaceInfo nsi = newNSInfo(); // populate with data first BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi); bkjm.format(nsi); for (int i = 1; i < 100 * 2; i += 2) { bkjm.startLogSegment(i, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); bkjm.finalizeLogSegment(i, i + 1); } bkjm.close(); final int numThreads = 40; List<Callable<ThreadStatus>> threads = new ArrayList<Callable<ThreadStatus>>(); final CyclicBarrier barrier = new CyclicBarrier(numThreads); for (int i = 0; i < numThreads; i++) { threads.add(new Callable<ThreadStatus>() { public ThreadStatus call() { BookKeeperJournalManager bkjm = null; try { bkjm = new BookKeeperJournalManager(conf, uri, nsi); barrier.await(); bkjm.format(nsi); return ThreadStatus.COMPLETED; } catch (IOException ioe) { LOG.info("Exception formatting ", ioe); return ThreadStatus.GOODEXCEPTION; } catch (InterruptedException ie) { LOG.error("Interrupted. Something is broken", ie); Thread.currentThread().interrupt(); return ThreadStatus.BADEXCEPTION; } catch (Exception e) { LOG.error("Some other bad exception", e); return ThreadStatus.BADEXCEPTION; } finally { if (bkjm != null) { try { bkjm.close(); } catch (IOException ioe) { LOG.error("Error closing journal manager", ioe); } } } } }); } ExecutorService service = Executors.newFixedThreadPool(numThreads); List<Future<ThreadStatus>> statuses = service.invokeAll(threads, 60, TimeUnit.SECONDS); int numCompleted = 0; for (Future<ThreadStatus> s : statuses) { assertTrue(s.isDone()); assertTrue("Thread threw invalid exception", s.get() == ThreadStatus.COMPLETED || s.get() == ThreadStatus.GOODEXCEPTION); if (s.get() == ThreadStatus.COMPLETED) { numCompleted++; } } LOG.info("Completed " + numCompleted + " formats"); assertTrue("No thread managed to complete formatting", numCompleted > 0); }
From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java
@Test(timeout = 20000) public void testConcurrentAddApplication() throws IOException, InterruptedException, BrokenBarrierException { final CyclicBarrier startBarrier = new CyclicBarrier(2); final CyclicBarrier endBarrier = new CyclicBarrier(2); // this token uses barriers to block during renew final Credentials creds1 = new Credentials(); final Token<DelegationTokenIdentifier> token1 = mock(Token.class); when(token1.getKind()).thenReturn(KIND); DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(new Text("user1"), new Text("renewer"), new Text("user1")); when(token1.decodeIdentifier()).thenReturn(dtId1); creds1.addToken(new Text("token"), token1); doReturn(true).when(token1).isManaged(); doAnswer(new Answer<Long>() { public Long answer(InvocationOnMock invocation) throws InterruptedException, BrokenBarrierException { startBarrier.await();//w ww. ja va 2s.co m endBarrier.await(); return Long.MAX_VALUE; } }).when(token1).renew(any(Configuration.class)); // this dummy token fakes renewing final Credentials creds2 = new Credentials(); final Token<DelegationTokenIdentifier> token2 = mock(Token.class); when(token2.getKind()).thenReturn(KIND); when(token2.decodeIdentifier()).thenReturn(dtId1); creds2.addToken(new Text("token"), token2); doReturn(true).when(token2).isManaged(); doReturn(Long.MAX_VALUE).when(token2).renew(any(Configuration.class)); // fire up the renewer final DelegationTokenRenewer dtr = createNewDelegationTokenRenewer(conf, counter); RMContext mockContext = mock(RMContext.class); when(mockContext.getSystemCredentialsForApps()) .thenReturn(new ConcurrentHashMap<ApplicationId, ByteBuffer>()); ClientRMService mockClientRMService = mock(ClientRMService.class); when(mockContext.getClientRMService()).thenReturn(mockClientRMService); InetSocketAddress sockAddr = InetSocketAddress.createUnresolved("localhost", 1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); dtr.setRMContext(mockContext); when(mockContext.getDelegationTokenRenewer()).thenReturn(dtr); dtr.init(conf); dtr.start(); // submit a job that blocks during renewal Thread submitThread = new Thread() { @Override public void run() { dtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user"); } }; submitThread.start(); // wait till 1st submit blocks, then submit another startBarrier.await(); dtr.addApplicationAsync(mock(ApplicationId.class), creds2, false, "user"); // signal 1st to complete endBarrier.await(); submitThread.join(); }
From source file:com.unboundid.scim.tools.SCIMQueryRate.java
/** * Performs the actual processing for this tool. In this case, it gets a * connection to the directory server and uses it to perform the requested * searches.//from ww w . j av a 2 s . co m * * @return The result code for the processing that was performed. */ @Override() public ResultCode doToolProcessing() { //Initalize the Debugger Debug.setEnabled(true); Debug.getLogger().addHandler(new ConsoleHandler()); Debug.getLogger().setUseParentHandlers(false); // Determine the random seed to use. final Long seed; if (randomSeed.isPresent()) { seed = Long.valueOf(randomSeed.getValue()); } else { seed = null; } // Create a value pattern for the filter. final ValuePattern filterPattern; boolean isQuery = true; if (filter.isPresent()) { try { filterPattern = new ValuePattern(filter.getValue(), seed); } catch (ParseException pe) { Debug.debugException(pe); err(ERR_QUERY_TOOL_BAD_FILTER_PATTERN.get(pe.getMessage())); return ResultCode.PARAM_ERROR; } } else if (resourceId.isPresent()) { isQuery = false; try { filterPattern = new ValuePattern(resourceId.getValue()); } catch (ParseException pe) { Debug.debugException(pe); err(ERR_QUERY_TOOL_BAD_RESOURCE_ID_PATTERN.get(pe.getMessage())); return ResultCode.PARAM_ERROR; } } else { filterPattern = null; } // Get the attributes to return. final String[] attrs; if (attributes.isPresent()) { final List<String> attrList = attributes.getValues(); attrs = new String[attrList.size()]; attrList.toArray(attrs); } else { attrs = NO_STRINGS; } // If the --ratePerSecond option was specified, then limit the rate // accordingly. FixedRateBarrier fixedRateBarrier = null; if (ratePerSecond.isPresent()) { final int intervalSeconds = collectionInterval.getValue(); final int ratePerInterval = ratePerSecond.getValue() * intervalSeconds; fixedRateBarrier = new FixedRateBarrier(1000L * intervalSeconds, ratePerInterval); } // Determine whether to include timestamps in the output and if so what // format should be used for them. final boolean includeTimestamp; final String timeFormat; if (timestampFormat.getValue().equalsIgnoreCase("with-date")) { includeTimestamp = true; timeFormat = "dd/MM/yyyy HH:mm:ss"; } else if (timestampFormat.getValue().equalsIgnoreCase("without-date")) { includeTimestamp = true; timeFormat = "HH:mm:ss"; } else { includeTimestamp = false; timeFormat = null; } // Determine whether any warm-up intervals should be run. final long totalIntervals; final boolean warmUp; int remainingWarmUpIntervals = warmUpIntervals.getValue(); if (remainingWarmUpIntervals > 0) { warmUp = true; totalIntervals = 0L + numIntervals.getValue() + remainingWarmUpIntervals; } else { warmUp = true; totalIntervals = 0L + numIntervals.getValue(); } // Create the table that will be used to format the output. final OutputFormat outputFormat; if (csvFormat.isPresent()) { outputFormat = OutputFormat.CSV; } else { outputFormat = OutputFormat.COLUMNS; } final ColumnFormatter formatter = new ColumnFormatter(includeTimestamp, timeFormat, outputFormat, " ", new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Queries/Sec"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Avg Dur ms"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Resources/Query"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Recent", "Errors/Sec"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Overall", "Queries/Sec"), new FormattableColumn(15, HorizontalAlignment.RIGHT, "Overall", "Avg Dur ms")); // Create values to use for statistics collection. final AtomicLong queryCounter = new AtomicLong(0L); final AtomicLong resourceCounter = new AtomicLong(0L); final AtomicLong errorCounter = new AtomicLong(0L); final AtomicLong queryDurations = new AtomicLong(0L); // Determine the length of each interval in milliseconds. final long intervalMillis = 1000L * collectionInterval.getValue(); // We will use Apache's HttpClient library for this tool. SSLUtil sslUtil; try { sslUtil = createSSLUtil(); } catch (LDAPException e) { debugException(e); err(e.getMessage()); return e.getResultCode(); } RegistryBuilder<ConnectionSocketFactory> registryBuilder = RegistryBuilder.create(); final String schemeName; if (sslUtil != null) { try { SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory( sslUtil.createSSLContext("TLS"), new NoopHostnameVerifier()); schemeName = "https"; registryBuilder.register(schemeName, sslConnectionSocketFactory); } catch (GeneralSecurityException e) { debugException(e); err(ERR_SCIM_TOOL_CANNOT_CREATE_SSL_CONTEXT.get(getExceptionMessage(e))); return ResultCode.LOCAL_ERROR; } } else { schemeName = "http"; registryBuilder.register(schemeName, new PlainConnectionSocketFactory()); } final Registry<ConnectionSocketFactory> socketFactoryRegistry = registryBuilder.build(); RequestConfig requestConfig = RequestConfig.custom().setConnectionRequestTimeout(30000) .setExpectContinueEnabled(true).build(); SocketConfig socketConfig = SocketConfig.custom().setSoTimeout(30000).setSoReuseAddress(true).build(); final PoolingHttpClientConnectionManager mgr = new PoolingHttpClientConnectionManager( socketFactoryRegistry); mgr.setMaxTotal(numThreads.getValue()); mgr.setDefaultMaxPerRoute(numThreads.getValue()); mgr.setDefaultSocketConfig(socketConfig); mgr.setValidateAfterInactivity(-1); ClientConfig jerseyConfig = new ClientConfig(); jerseyConfig.property(ApacheClientProperties.CONNECTION_MANAGER, mgr); jerseyConfig.property(ApacheClientProperties.REQUEST_CONFIG, requestConfig); ApacheConnectorProvider connectorProvider = new ApacheConnectorProvider(); jerseyConfig.connectorProvider(connectorProvider); if (authID.isPresent()) { try { final String password; if (authPassword.isPresent()) { password = authPassword.getValue(); } else if (authPasswordFile.isPresent()) { password = authPasswordFile.getNonBlankFileLines().get(0); } else { password = null; } BasicCredentialsProvider provider = new BasicCredentialsProvider(); provider.setCredentials(new AuthScope(host.getValue(), port.getValue()), new UsernamePasswordCredentials(authID.getValue(), password)); jerseyConfig.property(ApacheClientProperties.CREDENTIALS_PROVIDER, provider); jerseyConfig.property(ApacheClientProperties.PREEMPTIVE_BASIC_AUTHENTICATION, true); } catch (IOException e) { Debug.debugException(e); err(ERR_QUERY_TOOL_SET_BASIC_AUTH.get(e.getMessage())); return ResultCode.LOCAL_ERROR; } } else if (bearerToken.isPresent()) { jerseyConfig.register(new ClientRequestFilter() { public void filter(final ClientRequestContext clientRequestContext) throws IOException { try { clientRequestContext.getHeaders().add("Authorization", "Bearer " + bearerToken.getValue()); } catch (Exception ex) { throw new RuntimeException("Unable to add authorization handler", ex); } } }); } // Create the SCIM client to use for the queries. final URI uri; try { final String path; if (contextPath.getValue().startsWith("/")) { path = contextPath.getValue(); } else { path = "/" + contextPath.getValue(); } uri = new URI(schemeName, null, host.getValue(), port.getValue(), path, null, null); } catch (URISyntaxException e) { Debug.debugException(e); err(ERR_QUERY_TOOL_CANNOT_CREATE_URL.get(e.getMessage())); return ResultCode.OTHER; } final SCIMService service = new SCIMService(uri, jerseyConfig); if (xmlFormat.isPresent()) { service.setContentType(MediaType.APPLICATION_XML_TYPE); service.setAcceptType(MediaType.APPLICATION_XML_TYPE); } // Retrieve the resource schema. final ResourceDescriptor resourceDescriptor; try { resourceDescriptor = service.getResourceDescriptor(resourceName.getValue(), null); if (resourceDescriptor == null) { throw new ResourceNotFoundException( "Resource " + resourceName.getValue() + " is not defined by the service provider"); } } catch (SCIMException e) { Debug.debugException(e); err(ERR_QUERY_TOOL_RETRIEVE_RESOURCE_SCHEMA.get(e.getMessage())); return ResultCode.OTHER; } final SCIMEndpoint<? extends BaseResource> endpoint = service.getEndpoint(resourceDescriptor, BaseResource.BASE_RESOURCE_FACTORY); // Create the threads to use for the searches. final CyclicBarrier barrier = new CyclicBarrier(numThreads.getValue() + 1); final QueryRateThread[] threads = new QueryRateThread[numThreads.getValue()]; for (int i = 0; i < threads.length; i++) { threads[i] = new QueryRateThread(i, isQuery, endpoint, filterPattern, attrs, barrier, queryCounter, resourceCounter, queryDurations, errorCounter, fixedRateBarrier); threads[i].start(); } // Display the table header. for (final String headerLine : formatter.getHeaderLines(true)) { out(headerLine); } // Indicate that the threads can start running. try { barrier.await(); } catch (Exception e) { Debug.debugException(e); } long overallStartTime = System.nanoTime(); long nextIntervalStartTime = System.currentTimeMillis() + intervalMillis; boolean setOverallStartTime = false; long lastDuration = 0L; long lastNumEntries = 0L; long lastNumErrors = 0L; long lastNumSearches = 0L; long lastEndTime = System.nanoTime(); for (long i = 0; i < totalIntervals; i++) { final long startTimeMillis = System.currentTimeMillis(); final long sleepTimeMillis = nextIntervalStartTime - startTimeMillis; nextIntervalStartTime += intervalMillis; try { if (sleepTimeMillis > 0) { Thread.sleep(sleepTimeMillis); } } catch (Exception e) { Debug.debugException(e); } final long endTime = System.nanoTime(); final long intervalDuration = endTime - lastEndTime; final long numSearches; final long numEntries; final long numErrors; final long totalDuration; if (warmUp && (remainingWarmUpIntervals > 0)) { numSearches = queryCounter.getAndSet(0L); numEntries = resourceCounter.getAndSet(0L); numErrors = errorCounter.getAndSet(0L); totalDuration = queryDurations.getAndSet(0L); } else { numSearches = queryCounter.get(); numEntries = resourceCounter.get(); numErrors = errorCounter.get(); totalDuration = queryDurations.get(); } final long recentNumSearches = numSearches - lastNumSearches; final long recentNumEntries = numEntries - lastNumEntries; final long recentNumErrors = numErrors - lastNumErrors; final long recentDuration = totalDuration - lastDuration; final double numSeconds = intervalDuration / 1000000000.0d; final double recentSearchRate = recentNumSearches / numSeconds; final double recentErrorRate = recentNumErrors / numSeconds; final double recentAvgDuration; final double recentEntriesPerSearch; if (recentNumSearches > 0L) { recentEntriesPerSearch = 1.0d * recentNumEntries / recentNumSearches; recentAvgDuration = 1.0d * recentDuration / recentNumSearches / 1000000; } else { recentEntriesPerSearch = 0.0d; recentAvgDuration = 0.0d; } if (warmUp && (remainingWarmUpIntervals > 0)) { out(formatter.formatRow(recentSearchRate, recentAvgDuration, recentEntriesPerSearch, recentErrorRate, "warming up", "warming up")); remainingWarmUpIntervals--; if (remainingWarmUpIntervals == 0) { out(INFO_QUERY_TOOL_WARM_UP_COMPLETED.get()); setOverallStartTime = true; } } else { if (setOverallStartTime) { overallStartTime = lastEndTime; setOverallStartTime = false; } final double numOverallSeconds = (endTime - overallStartTime) / 1000000000.0d; final double overallSearchRate = numSearches / numOverallSeconds; final double overallAvgDuration; if (numSearches > 0L) { overallAvgDuration = 1.0d * totalDuration / numSearches / 1000000; } else { overallAvgDuration = 0.0d; } out(formatter.formatRow(recentSearchRate, recentAvgDuration, recentEntriesPerSearch, recentErrorRate, overallSearchRate, overallAvgDuration)); lastNumSearches = numSearches; lastNumEntries = numEntries; lastNumErrors = numErrors; lastDuration = totalDuration; } lastEndTime = endTime; } // Stop all of the threads. ResultCode resultCode = ResultCode.SUCCESS; for (final QueryRateThread t : threads) { t.signalShutdown(); } // Interrupt any blocked threads after a grace period. final WakeableSleeper sleeper = new WakeableSleeper(); sleeper.sleep(1000); mgr.shutdown(); for (final QueryRateThread t : threads) { final ResultCode r = t.waitForShutdown(); if (resultCode == ResultCode.SUCCESS) { resultCode = r; } } return resultCode; }
From source file:org.apache.hadoop.yarn.server.resourcemanager.TestClientRMService.java
@Test(timeout = 4000) public void testConcurrentAppSubmit() throws IOException, InterruptedException, BrokenBarrierException, YarnException, Exception { YarnScheduler yarnScheduler = mockYarnScheduler(); RMContext rmContext = mock(RMContext.class); mockRMContext(yarnScheduler, rmContext); RMStateStore stateStore = mock(RMStateStore.class); when(rmContext.getStateStore()).thenReturn(stateStore); RMAppManager appManager = new RMAppManager(rmContext, yarnScheduler, null, mock(ApplicationACLsManager.class), new Configuration()); final ApplicationId appId1 = getApplicationId(100); final ApplicationId appId2 = getApplicationId(101); final SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1, null, null); final SubmitApplicationRequest submitRequest2 = mockSubmitAppRequest(appId2, null, null); final CyclicBarrier startBarrier = new CyclicBarrier(2); final CyclicBarrier endBarrier = new CyclicBarrier(2); @SuppressWarnings("rawtypes") EventHandler eventHandler = new EventHandler() { @Override//w ww . ja v a2 s .c o m public void handle(Event rawEvent) { if (rawEvent instanceof RMAppEvent) { RMAppEvent event = (RMAppEvent) rawEvent; if (event.getApplicationId().equals(appId1)) { try { startBarrier.await(); endBarrier.await(); } catch (BrokenBarrierException e) { LOG.warn("Broken Barrier", e); } catch (InterruptedException e) { LOG.warn("Interrupted while awaiting barriers", e); } } } } }; when(rmContext.getDispatcher().getEventHandler()).thenReturn(eventHandler); final ClientRMService rmService = new ClientRMService(rmContext, yarnScheduler, appManager, null, null, null); rmService.serviceInit(new Configuration()); // submit an app and wait for it to block while in app submission Thread t = new Thread() { @Override public void run() { try { rmService.submitApplication(submitRequest1); } catch (YarnException e) { } } }; t.start(); // submit another app, so go through while the first app is blocked startBarrier.await(); rmService.submitApplication(submitRequest2); endBarrier.await(); t.join(); }
From source file:de.hybris.platform.test.TransactionTest.java
private void testIsolationInternal(final boolean rollbackChanges) throws ConsistencyCheckException { final Country country = jaloSession.getC2LManager().createCountry("code1"); try {//from w ww .ja v a 2 s . c o m assertEquals("code1", country.getIsoCode()); final RunnerCreator<IsolationTestRunnable> creator = new RunnerCreator<TransactionTest.IsolationTestRunnable>() { final CyclicBarrier txSync = new CyclicBarrier(2); final CountDownLatch waitForWrite = new CountDownLatch(1); final CountDownLatch codeWritten = new CountDownLatch(1); @Override public IsolationTestRunnable newRunner(final int threadNumber) { return new IsolationTestRunnable(country, threadNumber == 0, rollbackChanges, waitForWrite, codeWritten, txSync); } }; final TestThreadsHolder<IsolationTestRunnable> threads = new TestThreadsHolder<TransactionTest.IsolationTestRunnable>( 2, creator); threads.startAll(); assertTrue("not all threads have finished properly", threads.waitAndDestroy(30)); assertEquals(Collections.EMPTY_MAP, threads.getErrors()); } finally { if (country != null && country.isAlive()) { try { country.remove(); } catch (final Exception e) { // for several reasons this may fail without meaning harm so we ignore it here } } } }
From source file:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler.java
@Test(timeout = 30000) public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception { final YarnConfiguration conf = new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); MyContainerManager containerManager = new MyContainerManager(); final MockRMWithAMS rm = new MockRMWithAMS(conf, containerManager); rm.start();/*from w ww . j a v a2 s. c o m*/ MockNM nm1 = rm.registerNode("localhost:1234", 5120); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(2); acls.put(ApplicationAccessType.VIEW_APP, "*"); RMApp app = rm.submitApp(1024, "appname", "appuser", acls); nm1.nodeHeartbeat(true); RMAppAttempt attempt = app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId(); int msecToWait = 10000; int msecToSleep = 100; while (attempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED && msecToWait > 0) { LOG.info("Waiting for AppAttempt to reach LAUNCHED state. " + "Current state is " + attempt.getAppAttemptState()); Thread.sleep(msecToSleep); msecToWait -= msecToSleep; } Assert.assertEquals(attempt.getAppAttemptState(), RMAppAttemptState.LAUNCHED); // Create a client to the RM. final YarnRPC rpc = YarnRPC.create(conf); UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials = containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress = rm.getApplicationMasterService().getBindAddress(); Token<? extends TokenIdentifier> amRMToken = MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress, credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client = currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() { @Override public ApplicationMasterProtocol run() { return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rmBindAddress, conf); } }); RegisterApplicationMasterRequest request = RegisterApplicationMasterRequest.newInstance("localhost", 12345, ""); client.registerApplicationMaster(request); // Allocate a container List<ResourceRequest> asks = Collections.singletonList( ResourceRequest.newInstance(Priority.newInstance(1), "*", Resources.createResource(2 * GB), 1)); AllocateRequest allocateRequest = AllocateRequest.newInstance(0, 0.0f, asks, null, null); client.allocate(allocateRequest); // Make sure the container is allocated in RM nm1.nodeHeartbeat(true); ContainerId containerId2 = ContainerId.newContainerId(applicationAttemptId, 2); Assert.assertTrue(rm.waitForState(nm1, containerId2, RMContainerState.ALLOCATED, 10 * 1000)); // Acquire the container allocateRequest = AllocateRequest.newInstance(1, 0.0f, null, null, null); client.allocate(allocateRequest); // Launch the container final CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); RMContainer rmContainer = cs.getRMContainer(containerId2); rmContainer.handle(new RMContainerEvent(containerId2, RMContainerEventType.LAUNCHED)); // grab the scheduler lock from another thread // and verify an allocate call in this thread doesn't block on it final CyclicBarrier barrier = new CyclicBarrier(2); Thread otherThread = new Thread(new Runnable() { @Override public void run() { synchronized (cs) { try { barrier.await(); barrier.await(); } catch (InterruptedException | BrokenBarrierException e) { e.printStackTrace(); } } } }); otherThread.start(); barrier.await(); List<ContainerId> release = Collections.singletonList(containerId2); allocateRequest = AllocateRequest.newInstance(2, 0.0f, null, release, null); client.allocate(allocateRequest); barrier.await(); otherThread.join(); rm.stop(); }
From source file:org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.java
@Test(timeout = 200000) public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception { final long connectionWaitSecs = 1000; final long connectionRetryIntervalMs = 1000; int port = ServerSocketUtil.getPort(49156, 10); YarnConfiguration conf = createNMConfig(port); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, connectionWaitSecs); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, connectionRetryIntervalMs); conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, 5000); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1); CyclicBarrier syncBarrier = new CyclicBarrier(2); nm = new MyNodeManager2(syncBarrier, conf); nm.init(conf);//from www. j a v a2 s . c o m nm.start(); // start a container ContainerId cId = TestNodeManagerShutdown.createContainerId(); FileContext localFS = FileContext.getLocalFSFileContext(); TestNodeManagerShutdown.startContainer(nm, cId, localFS, nmLocalDir, new File("start_file.txt"), port); try { // Wait until we start stopping syncBarrier.await(10000, TimeUnit.MILLISECONDS); // Wait until we finish stopping syncBarrier.await(10000, TimeUnit.MILLISECONDS); } catch (Exception e) { } Assert.assertFalse("Containers not cleaned up when NM stopped", assertionFailedInThread.get()); Assert.assertTrue(((MyNodeManager2) nm).isStopped); Assert.assertTrue("calculate heartBeatCount based on" + " connectionWaitSecs and RetryIntervalSecs", heartBeatID == 2); }
From source file:org.eclipse.ecr.core.storage.sql.TestSQLBackend.java
protected static void runParallelLocking(Serializable nodeId, Repository repository1, Repository repository2) throws Throwable { CyclicBarrier barrier = new CyclicBarrier(2); CountDownLatch firstReady = new CountDownLatch(1); long TIME = 1000; // ms LockingJob r1 = new LockingJob(repository1, "t1-", nodeId, TIME, firstReady, barrier); LockingJob r2 = new LockingJob(repository2, "t2-", nodeId, TIME, null, barrier); Thread t1 = new Thread(r1, "t1"); Thread t2 = new Thread(r2, "t2"); t1.start();/*from w ww . ja v a 2s.c o m*/ firstReady.await(); t2.start(); t1.join(); t2.join(); if (r1.throwable != null) { throw r1.throwable; } if (r2.throwable != null) { throw r2.throwable; } int count = r1.count + r2.count; log.warn("Parallel locks per second: " + count); }
From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestResourceLocalizationService.java
@Test(timeout = 20000) @SuppressWarnings("unchecked") // mocked generics public void testFailedPublicResource() throws Exception { List<Path> localDirs = new ArrayList<Path>(); String[] sDirs = new String[4]; for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); sDirs[i] = localDirs.get(i).toString(); }//from w w w. j av a2 s . com conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs); DrainDispatcher dispatcher = new DrainDispatcher(); EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class); dispatcher.register(ApplicationEventType.class, applicationBus); EventHandler<ContainerEvent> containerBus = mock(EventHandler.class); dispatcher.register(ContainerEventType.class, containerBus); ContainerExecutor exec = mock(ContainerExecutor.class); DeletionService delService = mock(DeletionService.class); LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher.init(conf); dispatcher.start(); try { ResourceLocalizationService rawService = new ResourceLocalizationService(dispatcher, exec, delService, dirsHandler, nmContext); ResourceLocalizationService spyService = spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class)); spyService.init(conf); spyService.start(); final String user = "user0"; final String userFolder = "user0Folder"; // init application final Application app = mock(Application.class); final ApplicationId appId = BuilderUtils.newApplicationId(314159265358979L, 3); when(app.getUser()).thenReturn(user); when(app.getUserFolder()).thenReturn(userFolder); when(app.getAppId()).thenReturn(appId); spyService.handle( new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES, app)); dispatcher.await(); // init container. final Container c = getMockContainer(appId, 42, user, userFolder); // init resources Random r = new Random(); long seed = r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); // cause chmod to fail after a delay final CyclicBarrier barrier = new CyclicBarrier(2); doAnswer(new Answer<Void>() { public Void answer(InvocationOnMock invocation) throws IOException { try { barrier.await(); } catch (InterruptedException e) { } catch (BrokenBarrierException e) { } throw new IOException("forced failure"); } }).when(spylfs).setPermission(isA(Path.class), isA(FsPermission.class)); // Queue up two localization requests for the same public resource final LocalResource pubResource = getPublicMockedResource(r); final LocalResourceRequest pubReq = new LocalResourceRequest(pubResource); Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>(); req.put(LocalResourceVisibility.PUBLIC, Collections.singletonList(pubReq)); Set<LocalResourceRequest> pubRsrcs = new HashSet<LocalResourceRequest>(); pubRsrcs.add(pubReq); spyService.handle(new ContainerLocalizationRequestEvent(c, req)); spyService.handle(new ContainerLocalizationRequestEvent(c, req)); dispatcher.await(); // allow the chmod to fail now that both requests have been queued barrier.await(); verify(containerBus, timeout(5000).times(2)).handle(isA(ContainerResourceFailedEvent.class)); } finally { dispatcher.stop(); } }
From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java
@Test public void testConcurrentOpenCursor() throws Exception { ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testConcurrentOpenCursor"); final AtomicReference<ManagedCursor> cursor1 = new AtomicReference<>(null); final AtomicReference<ManagedCursor> cursor2 = new AtomicReference<>(null); final CyclicBarrier barrier = new CyclicBarrier(2); final CountDownLatch latch = new CountDownLatch(2); cachedExecutor.execute(() -> {/*from w w w . ja v a 2 s. c om*/ try { barrier.await(); } catch (Exception e) { } ledger.asyncOpenCursor("c1", new OpenCursorCallback() { @Override public void openCursorFailed(ManagedLedgerException exception, Object ctx) { latch.countDown(); } @Override public void openCursorComplete(ManagedCursor cursor, Object ctx) { cursor1.set(cursor); latch.countDown(); } }, null); }); cachedExecutor.execute(() -> { try { barrier.await(); } catch (Exception e) { } ledger.asyncOpenCursor("c1", new OpenCursorCallback() { @Override public void openCursorFailed(ManagedLedgerException exception, Object ctx) { latch.countDown(); } @Override public void openCursorComplete(ManagedCursor cursor, Object ctx) { cursor2.set(cursor); latch.countDown(); } }, null); }); latch.await(); assertNotNull(cursor1.get()); assertNotNull(cursor2.get()); assertEquals(cursor1.get(), cursor2.get()); ledger.close(); }