List of usage examples for java.util.concurrent ScheduledExecutorService shutdown
void shutdown();
From source file:org.apache.hadoop.hbase.test.IntegrationTestTimeBoundedRequestsWithRegionReplicas.java
@Override protected void runIngestTest(long defaultRunTime, long keysPerServerPerIter, int colsPerKey, int recordSize, int writeThreads, int readThreads) throws Exception { LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize()); long start = System.currentTimeMillis(); String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); long startKey = 0; long numKeys = getNumKeys(keysPerServerPerIter); // write data once LOG.info("Writing some data to the table"); writeData(colsPerKey, recordSize, writeThreads, startKey, numKeys); // flush the table LOG.info("Flushing the table"); Admin admin = util.getHBaseAdmin();/* w w w . j a v a 2s .c o m*/ admin.flush(getTablename()); // re-open the regions to make sure that the replicas are up to date long refreshTime = conf.getLong(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 0); if (refreshTime > 0 && refreshTime <= 10000) { LOG.info("Sleeping " + refreshTime + "ms to ensure that the data is replicated"); Threads.sleep(refreshTime * 3); } else { LOG.info("Reopening the table"); admin.disableTable(getTablename()); admin.enableTable(getTablename()); } // We should only start the ChaosMonkey after the readers are started and have cached // all of the region locations. Because the meta is not replicated, the timebounded reads // will timeout if meta server is killed. // We will start the chaos monkey after 1 minute, and since the readers are reading random // keys, it should be enough to cache every region entry. long chaosMonkeyDelay = conf.getLong(String.format("%s.%s", TEST_NAME, CHAOS_MONKEY_DELAY_KEY), DEFAUL_CHAOS_MONKEY_DELAY); ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); LOG.info(String.format("ChaosMonkey delay is : %d seconds. Will start %s " + "ChaosMonkey after delay", chaosMonkeyDelay / 1000, monkeyToUse)); ScheduledFuture<?> result = executorService.schedule(new Runnable() { @Override public void run() { try { LOG.info("Starting ChaosMonkey"); monkey.start(); monkey.waitForStop(); } catch (Exception e) { LOG.warn(StringUtils.stringifyException(e)); } } }, chaosMonkeyDelay, TimeUnit.MILLISECONDS); // set the intended run time for the reader. The reader will do read requests // to random keys for this amount of time. long remainingTime = runtime - (System.currentTimeMillis() - start); LOG.info("Reading random keys from the table for " + remainingTime / 60000 + " min"); this.conf.setLong(String.format(RUN_TIME_KEY, TimeBoundedMultiThreadedReader.class.getSimpleName()), remainingTime); // load tool shares the same conf // now start the readers which will run for configured run time try { int ret = loadTool .run(getArgsForLoadTestTool("-read", String.format("100:%d", readThreads), startKey, numKeys)); if (0 != ret) { String errorMsg = "Verification failed with error code " + ret; LOG.error(errorMsg); Assert.fail(errorMsg); } } finally { if (result != null) result.cancel(false); monkey.stop("Stopping the test"); monkey.waitForStop(); executorService.shutdown(); } }
From source file:fr.tpt.atlanalyser.examples.ExampleRunner.java
public void executePost2Pre(File postFile, int maxNumRuleIterations) throws IOException { // checkDirs(); inputMM = getInputMMEPkg();/*w w w . j a v a 2s . c o m*/ outputMM = getOutputMMEPkg(); makeAbstractClassesInstantiable(inputMM); makeAbstractClassesInstantiable(outputMM); Module transfo = loadHenshinTransformation(); EcoreUtil.resolveAll(transfo); EPackage traceMM = resourceSet.getPackageRegistry().getEPackage("http://traces/1.0"); stripFromAttributes(transfo); Resource postRes = resourceSet.getResource(URI.createFileURI(postFile.getCanonicalPath()), true); Module postModule = (Module) postRes.getContents().get(0); EList<Unit> units = postModule.getUnits(); List<Formula> postconditions = Lists.transform(units, new Function<Unit, Formula>() { @Override public Formula apply(Unit arg0) { return ((Rule) arg0).getLhs().getFormula(); } }); Module preModule = HenshinFactory.eINSTANCE.createModule(); preModule.setName("Preconditions"); LOGGER.info("Starting Post2Pre for {}", transfo.getName()); Post2Pre4ATL post2Pre = new Post2Pre4ATL(transfo, inputMM, outputMM, traceMM, jobs); ScheduledExecutorService memMonitorExecutor = Executors.newScheduledThreadPool(1); memMonitorExecutor.scheduleAtFixedRate(new Runnable() { private final MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean(); private final Logger LOGGER = LogManager.getLogger("MemMon"); @Override public void run() { // LOGGER.setAdditivity(false); // for (Enumeration iterator = // AGGWrapper.LOGGER.getAllAppenders(); iterator // .hasMoreElements();) { // Appender appender = (Appender) iterator.nextElement(); // LOGGER.addAppender(appender); // } MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage(); final long used = heapUsage.getUsed(); double usedGB = (double) used / (1 << 30); final long max = heapUsage.getMax(); double maxGB = (double) max / (1 << 30); LOGGER.info(String.format("Memory use : %6.3fGB of %6.3fGB (%.2f%%)", usedGB, maxGB, (float) used / max * 100)); } }, 0, 10, TimeUnit.SECONDS); try { for (Formula formula : postconditions) { Formula pre = post2Pre.post2Pre(formula, maxNumRuleIterations); Rule preRule = HenshinFactory.eINSTANCE.createRule(); Graph preLhs = HenshinFactory.eINSTANCE.createGraph(); preLhs.setFormula(EcoreUtil.copy(pre)); preRule.setLhs(preLhs); preModule.getUnits().add(preRule); } } finally { memMonitorExecutor.shutdown(); } File outputDir = new File(baseDir, "Preconditions"); if (!outputDir.isDirectory()) { outputDir.mkdir(); } Resource outputRes = resourceSet .createResource(URI.createFileURI("Preconditions/" + postFile.getName() + "_pre.henshin")); outputRes.getContents().add(preModule); LOGGER.info("Writing Precondition in {}", outputRes.getURI().toString()); outputRes.save(xmiSaveOptions); LOGGER.info("Done!"); }
From source file:org.apache.jackrabbit.core.RepositoryImpl.java
/** * Protected method that performs the actual shutdown after the shutdown * lock has been acquired by the {@link #shutdown()} method. *//*from w ww .j a v a 2 s.c o m*/ protected synchronized void doShutdown() { log.info("Shutting down repository..."); // stop optional cluster node ClusterNode clusterNode = context.getClusterNode(); if (clusterNode != null) { clusterNode.stop(); } if (securityMgr != null) { securityMgr.close(); } // close active user sessions // (copy sessions to array to avoid ConcurrentModificationException; // manually copy entries rather than calling ReferenceMap#toArray() in // order to work around http://issues.apache.org/bugzilla/show_bug.cgi?id=25551) List<Session> sa; synchronized (activeSessions) { sa = new ArrayList<Session>(activeSessions.size()); for (Session session : activeSessions.values()) { sa.add(session); } } for (Session session : sa) { if (session != null) { session.logout(); } } // shutdown system search manager if there is one if (systemSearchMgr != null) { systemSearchMgr.close(); } // shut down workspaces synchronized (wspInfos) { for (WorkspaceInfo wspInfo : wspInfos.values()) { wspInfo.dispose(); } } try { InternalVersionManager m = context.getInternalVersionManager(); if (m != null) { m.close(); } } catch (Exception e) { log.error("Error while closing Version Manager.", e); } repDescriptors.clear(); DataStore dataStore = context.getDataStore(); if (dataStore != null) { try { // close the datastore dataStore.close(); } catch (DataStoreException e) { log.error("error while closing datastore", e); } } try { // close repository file system context.getFileSystem().close(); } catch (FileSystemException e) { log.error("error while closing repository file system", e); } try { nodeIdFactory.close(); } catch (RepositoryException e) { log.error("error while closing repository file system", e); } // make sure this instance is not used anymore disposed = true; // wake up threads waiting on this instance's monitor (e.g. workspace janitor) notifyAll(); // Shut down the executor service ScheduledExecutorService executor = context.getExecutor(); executor.shutdown(); try { // Wait for all remaining background threads to terminate if (!executor.awaitTermination(10, TimeUnit.SECONDS)) { log.warn("Attempting to forcibly shutdown runaway threads"); executor.shutdownNow(); } } catch (InterruptedException e) { log.warn("Interrupted while waiting for background threads", e); } repConfig.getConnectionFactory().close(); // finally release repository lock if (repLock != null) { try { repLock.release(); } catch (RepositoryException e) { log.error("failed to release the repository lock", e); } } log.info("Repository has been shutdown"); }
From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java
@Test public void testValidatePool() throws Exception { PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager( new NioEventLoopGroup(), new HashedWheelTimer(), new NettyClientMetrics(null, "abc")); ExecutorService executorService = Executors.newCachedThreadPool(); ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5); String serverName = "server"; MetricsRegistry metricsRegistry = new MetricsRegistry(); AsyncPoolResourceManagerAdapter<PooledNettyClientResourceManager.PooledClientConnection> rmAdapter = new AsyncPoolResourceManagerAdapter<>( _clientServer, resourceManager, executorService, metricsRegistry); AsyncPool<PooledNettyClientResourceManager.PooledClientConnection> pool = new AsyncPoolImpl<>(serverName, rmAdapter, /*maxSize=*/5, /*idleTimeoutMs=*/100000L, timeoutExecutor, executorService, /*maxWaiters=*/10, AsyncPoolImpl.Strategy.LRU, /*minSize=*/2, metricsRegistry); try {// w w w .j a va2 s.c om pool.start(); Uninterruptibles.sleepUninterruptibly(1L, TimeUnit.SECONDS); // Test no connection in pool Assert.assertTrue(pool.validate(false)); PoolStats stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 2); Assert.assertEquals(stats.getTotalBadDestroyed(), 0); Assert.assertEquals(stats.getCheckedOut(), 0); // Test one connection, it should not destroy anything AsyncResponseFuture<PooledNettyClientResourceManager.PooledClientConnection> responseFuture = new AsyncResponseFuture<>( _clientServer, null); pool.get(responseFuture); Assert.assertNotNull(responseFuture.getOne()); Assert.assertTrue(pool.validate(false)); stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 2); Assert.assertEquals(stats.getTotalBadDestroyed(), 0); Assert.assertEquals(stats.getCheckedOut(), 1); // Now stop the server, so that the checked out connection is invalidated closeServerConnection(); Assert.assertTrue(pool.validate(false)); stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 2); Assert.assertEquals(stats.getTotalBadDestroyed(), 1); Assert.assertEquals(stats.getCheckedOut(), 1); } finally { pool.shutdown(new Callback<NoneType>() { @Override public void onSuccess(NoneType arg0) { } @Override public void onError(Throwable arg0) { Assert.fail("Shutdown error"); } }); executorService.shutdown(); timeoutExecutor.shutdown(); } }
From source file:com.palantir.atlasdb.transaction.impl.SnapshotTransactionTest.java
@Test public void testTransactionAtomicity() throws Exception { // This test runs multiple transactions in parallel, with KeyValueService.put calls throwing // a RuntimeException from time to time and hanging other times. which effectively kills the // thread. We ensure that every transaction either adds 5 rows to the table or adds 0 rows // by checking at the end that the number of rows is a multiple of 5. final String tableName = "table"; Random random = new Random(1); final UnstableKeyValueService unstableKvs = new UnstableKeyValueService(keyValueService, random); final TestTransactionManager unstableTransactionManager = new TestTransactionManagerImpl(unstableKvs, timestampService, lockClient, lockService, transactionService, conflictDetectionManager, sweepStrategyManager);/*from w w w .java2 s .co m*/ ScheduledExecutorService service = PTExecutors.newScheduledThreadPool(20); for (int i = 0; i < 30; i++) { final int threadNumber = i; service.schedule(new Callable<Void>() { @Override public Void call() throws Exception { if (threadNumber == 10) { unstableKvs.setRandomlyThrow(true); } if (threadNumber == 20) { unstableKvs.setRandomlyHang(true); } Transaction transaction = unstableTransactionManager.createNewTransaction(); BatchingVisitable<RowResult<byte[]>> results = transaction.getRange(tableName, RangeRequest.builder().build()); final MutableInt nextIndex = new MutableInt(0); results.batchAccept(1, AbortingVisitors.batching(new AbortingVisitor<RowResult<byte[]>, Exception>() { @Override public boolean visit(RowResult<byte[]> row) throws Exception { byte[] dataBytes = row.getColumns().get("data".getBytes()); BigInteger dataValue = new BigInteger(dataBytes); nextIndex.setValue(Math.max(nextIndex.toInteger(), dataValue.intValue() + 1)); return true; } })); // nextIndex now contains the least row number not already in the table. Add 5 more // rows to the table. for (int j = 0; j < 5; j++) { int rowNumber = nextIndex.toInteger() + j; Cell cell = Cell.create(("row" + rowNumber).getBytes(), "data".getBytes()); transaction.put(tableName, ImmutableMap.of(cell, BigInteger.valueOf(rowNumber).toByteArray())); Thread.yield(); } transaction.commit(); return null; } }, i * 20, TimeUnit.MILLISECONDS); } service.shutdown(); service.awaitTermination(1, TimeUnit.SECONDS); // Verify each table has a number of rows that's a multiple of 5 Transaction verifyTransaction = txManager.createNewTransaction(); BatchingVisitable<RowResult<byte[]>> results = verifyTransaction.getRange(tableName, RangeRequest.builder().build()); final MutableInt numRows = new MutableInt(0); results.batchAccept(1, AbortingVisitors.batching(new AbortingVisitor<RowResult<byte[]>, Exception>() { @Override public boolean visit(RowResult<byte[]> row) throws Exception { numRows.increment(); return true; } })); Assert.assertEquals(0, numRows.toInteger() % 5); }
From source file:org.elasticsearch.client.sniff.SnifferTests.java
public void testTaskCancelling() throws Exception { RestClient restClient = mock(RestClient.class); HostsSniffer hostsSniffer = mock(HostsSniffer.class); Scheduler noOpScheduler = new Scheduler() { @Override// w w w. j a v a 2s. c o m public Future<?> schedule(Sniffer.Task task, long delayMillis) { return null; } @Override public void shutdown() { } }; Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { int numIters = randomIntBetween(50, 100); for (int i = 0; i < numIters; i++) { Sniffer.Task task = sniffer.new Task(0L); TaskWrapper wrapper = new TaskWrapper(task); Future<?> future; if (rarely()) { future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); } else { future = executor.submit(wrapper); } Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future); boolean skip = scheduledTask.skip(); try { assertNull(future.get()); } catch (CancellationException ignore) { assertTrue(future.isCancelled()); } if (skip) { //the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel), //or skipped, in which case it will run but do nothing (thanks to Task#skip). //Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything, //otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to // make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on. assertFalse(task.hasStarted()); assertTrue(task.isSkipped()); assertTrue(future.isCancelled()); assertTrue(future.isDone()); } else { //if a future is cancelled when its execution has already started, future#get throws CancellationException before //completion. The execution continues though so we use a latch to try and wait for the task to be completed. //Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be //missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing. assertTrue(wrapper.await()); //the future may or may not be cancelled but the task has for sure started and completed assertTrue(task.toString(), task.hasStarted()); assertFalse(task.isSkipped()); assertTrue(future.isDone()); } //subsequent cancel calls return false for sure int cancelCalls = randomIntBetween(1, 10); for (int j = 0; j < cancelCalls; j++) { assertFalse(scheduledTask.skip()); } } } finally { executor.shutdown(); executor.awaitTermination(1000, TimeUnit.MILLISECONDS); } }
From source file:com.linkedin.pinot.transport.netty.NettySingleConnectionIntegrationTest.java
/** * This test attempts to use the connection mechanism the same way as ScatterGatherImpl.SingleRequestHandler does. */// w w w.j a v a 2s . c o m @SuppressWarnings("unchecked") @Test public void testServerShutdownLeak() throws Exception { PooledNettyClientResourceManager resourceManager = new PooledNettyClientResourceManager( new NioEventLoopGroup(), new HashedWheelTimer(), new NettyClientMetrics(null, "abc")); ExecutorService executorService = Executors.newCachedThreadPool(); ScheduledExecutorService timeoutExecutor = new ScheduledThreadPoolExecutor(5); KeyedPool<PooledNettyClientResourceManager.PooledClientConnection> keyedPool = new KeyedPoolImpl<>( /*minSize=*/2, /*maxSize=*/3, /*idleTimeoutMs=*/100000L, /*maxPending=*/1, resourceManager, timeoutExecutor, executorService, new MetricsRegistry()); resourceManager.setPool(keyedPool); try { keyedPool.start(); // The act of calling checkoutObject() creates a new AsyncPool and places a request for a new connection // NOTE: since no connections are available in the beginning, and the min connections are still being filled, we // always end up creating one more connection than the min connections Assert.assertNotNull(keyedPool.checkoutObject(_clientServer).getOne()); // Use reflection to get the pool and the waiters queue Field keyedPoolField = KeyedPoolImpl.class.getDeclaredField("_keyedPool"); keyedPoolField.setAccessible(true); Map<ServerInstance, AsyncPool<NettyClientConnection>> poolMap = (Map<ServerInstance, AsyncPool<NettyClientConnection>>) keyedPoolField .get(keyedPool); AsyncPool<NettyClientConnection> pool = poolMap.get(_clientServer); Field waitersField = AsyncPoolImpl.class.getDeclaredField("_waiters"); waitersField.setAccessible(true); LinkedDequeue waitersQueue = (LinkedDequeue) waitersField.get(pool); // Make sure the min pool size is filled out Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS); PoolStats stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 3); Assert.assertEquals(stats.getIdleCount(), 2); Assert.assertEquals(stats.getCheckedOut(), 1); Assert.assertEquals(waitersQueue.size(), 0); // Get two more connections to the server and leak them Assert.assertNotNull(keyedPool.checkoutObject(_clientServer).getOne()); Assert.assertNotNull(keyedPool.checkoutObject(_clientServer).getOne()); stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 3); Assert.assertEquals(stats.getIdleCount(), 0); Assert.assertEquals(stats.getCheckedOut(), 3); Assert.assertEquals(waitersQueue.size(), 0); // Try to get one more connection // We should get an exception because we don't have a free connection to the server ServerResponseFuture<PooledNettyClientResourceManager.PooledClientConnection> serverResponseFuture = keyedPool .checkoutObject(_clientServer); try { serverResponseFuture.getOne(1, TimeUnit.SECONDS); Assert.fail("Get connection even no connections available"); } catch (TimeoutException e) { // PASS } stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 3); Assert.assertEquals(stats.getIdleCount(), 0); Assert.assertEquals(stats.getCheckedOut(), 3); Assert.assertEquals(waitersQueue.size(), 1); serverResponseFuture.cancel(true); Assert.assertEquals(waitersQueue.size(), 0); // If the server goes down, we should release all 3 connections and be able to get new connections closeServerConnection(); setUp(); stats = pool.getStats(); Assert.assertEquals(stats.getPoolSize(), 2); Assert.assertEquals(stats.getIdleCount(), 2); // Try to get 3 new connections for (int i = 0; i < 3; i++) { Assert.assertNotNull(keyedPool.checkoutObject(_clientServer).getOne()); } } finally { keyedPool.shutdown(); executorService.shutdown(); timeoutExecutor.shutdown(); } }
From source file:org.elasticsearch.client.sniff.SnifferTests.java
/** * Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always * schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round * (either afterFailure or ordinary) gets cancelled. *//*w w w . j a v a 2s . c om*/ public void testSniffOnFailure() throws Exception { RestClient restClient = mock(RestClient.class); CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); final AtomicBoolean initializing = new AtomicBoolean(true); final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); int minNumOnFailureRounds = randomIntBetween(5, 10); final CountDownLatch initializingLatch = new CountDownLatch(1); final Set<Sniffer.ScheduledTask> ordinaryRoundsTasks = new CopyOnWriteArraySet<>(); final AtomicReference<Future<?>> initializingFuture = new AtomicReference<>(); final Set<Sniffer.ScheduledTask> onFailureTasks = new CopyOnWriteArraySet<>(); final Set<Sniffer.ScheduledTask> afterFailureTasks = new CopyOnWriteArraySet<>(); final AtomicBoolean onFailureCompleted = new AtomicBoolean(false); final CountDownLatch completionLatch = new CountDownLatch(1); final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { Scheduler scheduler = new Scheduler() { @Override public Future<?> schedule(final Sniffer.Task task, long delayMillis) { if (initializing.compareAndSet(true, false)) { assertEquals(0L, delayMillis); Future<?> future = executor.submit(new Runnable() { @Override public void run() { try { task.run(); } finally { //we need to make sure that the sniffer is initialized, so the sniffOnFailure //call does what it needs to do. Otherwise nothing happens until initialized. initializingLatch.countDown(); } } }); assertTrue(initializingFuture.compareAndSet(null, future)); return future; } if (delayMillis == 0L) { Future<?> future = executor.submit(task); onFailureTasks.add(new Sniffer.ScheduledTask(task, future)); return future; } if (delayMillis == sniffAfterFailureDelay) { Future<?> future = scheduleOrSubmit(task); afterFailureTasks.add(new Sniffer.ScheduledTask(task, future)); return future; } assertEquals(sniffInterval, delayMillis); assertEquals(sniffInterval, task.nextTaskDelay); if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) { completionLatch.countDown(); return mock(Future.class); } Future<?> future = scheduleOrSubmit(task); ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future)); return future; } private Future<?> scheduleOrSubmit(Sniffer.Task task) { if (randomBoolean()) { return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS); } else { return executor.submit(task); } } @Override public void shutdown() { } }; final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); Set<Future<?>> onFailureFutures = new CopyOnWriteArraySet<>(); try { //with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped //as another round is already running. We retry till enough runs get through as that's what we want to test. while (onFailureTasks.size() < minNumOnFailureRounds) { onFailureFutures.add(onFailureExecutor.submit(new Runnable() { @Override public void run() { sniffer.sniffOnFailure(); } })); } assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); for (Future<?> onFailureFuture : onFailureFutures) { assertNull(onFailureFuture.get()); } onFailureCompleted.set(true); } finally { onFailureExecutor.shutdown(); onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS); } assertFalse(initializingFuture.get().isCancelled()); assertTrue(initializingFuture.get().isDone()); assertNull(initializingFuture.get().get()); assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds)); assertEquals(onFailureTasks.size(), afterFailureTasks.size()); for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) { assertFalse(onFailureTask.future.isCancelled()); assertTrue(onFailureTask.future.isDone()); assertNull(onFailureTask.future.get()); assertTrue(onFailureTask.task.hasStarted()); assertFalse(onFailureTask.task.isSkipped()); } int cancelledTasks = 0; int completedTasks = onFailureTasks.size() + 1; for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) { if (assertTaskCancelledOrCompleted(afterFailureTask)) { completedTasks++; } else { cancelledTasks++; } } assertThat(ordinaryRoundsTasks.size(), greaterThan(0)); for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) { if (assertTaskCancelledOrCompleted(task)) { completedTasks++; } else { cancelledTasks++; } } assertEquals(onFailureTasks.size(), cancelledTasks); assertEquals(completedTasks, hostsSniffer.runs.get()); int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg()); verifyNoMoreInteractions(restClient); } finally { executor.shutdown(); executor.awaitTermination(1000L, TimeUnit.MILLISECONDS); } }
From source file:org.alfresco.repo.security.sync.TenantChainingUserRegistrySynchronizer.java
private void synchronizeInternal(boolean forceUpdate, boolean isFullSync, final boolean splitTxns) { TenantChainingUserRegistrySynchronizer.logger .debug("Running a sync for domain: " + SEIPTenantIntegration.getTenantId()); if (TenantChainingUserRegistrySynchronizer.logger.isDebugEnabled()) { if (forceUpdate) { TenantChainingUserRegistrySynchronizer.logger.debug("Running a full sync."); } else {/*from www .j a v a 2 s . c om*/ TenantChainingUserRegistrySynchronizer.logger.debug("Running a differential sync."); } if (allowDeletions) { TenantChainingUserRegistrySynchronizer.logger.debug("deletions are allowed"); } else { TenantChainingUserRegistrySynchronizer.logger.debug("deletions are not allowed"); } // Don't proceed with the sync if the repository is read only if (this.transactionService.isReadOnly()) { TenantChainingUserRegistrySynchronizer.logger .warn("Unable to proceed with user registry synchronization. Repository is read only."); return; } } // Don't proceed with the sync if the repository is read only if (this.transactionService.isReadOnly()) { TenantChainingUserRegistrySynchronizer.logger .warn("Unable to proceed with user registry synchronization. Repository is read only."); return; } // Create a background executor that will refresh our lock. This means // we can request a lock with a relatively // small persistence time and not worry about it lasting after server // restarts. Note we use an independent // executor because this is a compound operation that spans accross // multiple batch processors. String lockToken = null; TraceableThreadFactory threadFactory = new TraceableThreadFactory(); threadFactory.setNamePrefix("TenantChainingUserRegistrySynchronizer lock refresh"); threadFactory.setThreadDaemon(true); ScheduledExecutorService lockRefresher = new ScheduledThreadPoolExecutor(1, threadFactory); // Let's ensure all exceptions get logged try { // First, try to obtain a lock to ensure we are the only node trying // to run this job try { if (splitTxns) { // If this is an automated sync on startup or scheduled // sync, don't even wait around for the lock. // Assume the sync will be completed on another node. lockToken = this.transactionService.getRetryingTransactionHelper() .doInTransaction(new RetryingTransactionCallback<String>() { public String execute() throws Throwable { return TenantChainingUserRegistrySynchronizer.this.jobLockService.getLock( TenantChainingUserRegistrySynchronizer.LOCK_QNAME, TenantChainingUserRegistrySynchronizer.LOCK_TTL, 0, 1); } }, false, splitTxns); } else { // If this is a login-triggered sync, give it a few retries // before giving up lockToken = this.jobLockService.getLock(TenantChainingUserRegistrySynchronizer.LOCK_QNAME, TenantChainingUserRegistrySynchronizer.LOCK_TTL, 3000, 10); } } catch (LockAcquisitionException e) { // Don't proceed with the sync if it is running on another node TenantChainingUserRegistrySynchronizer.logger.warn( "User registry synchronization already running in another thread. Synchronize aborted"); return; } // Schedule the lock refresh to run at regular intervals final String token = lockToken; lockRefresher.scheduleAtFixedRate(new Runnable() { public void run() { TenantChainingUserRegistrySynchronizer.this.transactionService.getRetryingTransactionHelper() .doInTransaction(new RetryingTransactionCallback<Object>() { public Object execute() throws Throwable { TenantChainingUserRegistrySynchronizer.this.jobLockService.refreshLock(token, TenantChainingUserRegistrySynchronizer.LOCK_QNAME, TenantChainingUserRegistrySynchronizer.LOCK_TTL); return null; } }, false, splitTxns); } }, TenantChainingUserRegistrySynchronizer.LOCK_TTL / 2, TenantChainingUserRegistrySynchronizer.LOCK_TTL / 2, TimeUnit.MILLISECONDS); Set<String> visitedZoneIds = new TreeSet<String>(); Collection<String> instanceIds = this.applicationContextManager.getInstanceIds(); // Work out the set of all zone IDs in the authentication chain so // that we can decide which users / groups // need 're-zoning' Set<String> allZoneIds = new TreeSet<String>(); for (String id : instanceIds) { allZoneIds.add(AuthorityService.ZONE_AUTH_EXT_PREFIX + id); } for (String id : instanceIds) { ApplicationContext context = this.applicationContextManager.getApplicationContext(id); try { UserRegistry plugin = (UserRegistry) context.getBean(this.sourceBeanName); if (!(plugin instanceof ActivateableBean) || ((ActivateableBean) plugin).isActive()) { if (TenantChainingUserRegistrySynchronizer.logger.isDebugEnabled()) { mbeanServer = (MBeanServerConnection) getApplicationContext() .getBean("alfrescoMBeanServer"); try { StringBuilder nameBuff = new StringBuilder(200).append( "Alfresco:Type=Configuration,Category=Authentication,id1=managed,id2=") .append(URLDecoder.decode(id, "UTF-8")); ObjectName name = new ObjectName(nameBuff.toString()); if (mbeanServer != null && mbeanServer.isRegistered(name)) { MBeanInfo info = mbeanServer.getMBeanInfo(name); MBeanAttributeInfo[] attributes = info.getAttributes(); TenantChainingUserRegistrySynchronizer.logger.debug(id + " attributes:"); for (MBeanAttributeInfo attribute : attributes) { Object value = mbeanServer.getAttribute(name, attribute.getName()); TenantChainingUserRegistrySynchronizer.logger .debug(attribute.getName() + " = " + value); } } } catch (UnsupportedEncodingException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (MalformedObjectNameException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (InstanceNotFoundException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (IntrospectionException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (AttributeNotFoundException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (ReflectionException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (MBeanException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (IOException e) { if (TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } } if (TenantChainingUserRegistrySynchronizer.logger.isInfoEnabled()) { TenantChainingUserRegistrySynchronizer.logger .info("Synchronizing users and groups with user registry '" + id + "'"); } if (isFullSync && TenantChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { TenantChainingUserRegistrySynchronizer.logger .warn("Full synchronization with user registry '" + id + "'"); if (allowDeletions) { TenantChainingUserRegistrySynchronizer.logger.warn( "Some users and groups previously created by synchronization with this user registry may be removed."); } else { TenantChainingUserRegistrySynchronizer.logger.warn( "Deletions are disabled. Users and groups removed from this registry will be logged only and will remain in the repository. Users previously found in a different registry will be moved in the repository rather than recreated."); } } // Work out whether we should do the work in a separate // transaction (it's most performant if we // bunch it into small transactions, but if we are doing // a sync on login, it has to be the same // transaction) boolean requiresNew = splitTxns || AlfrescoTransactionSupport .getTransactionReadState() == TxnReadState.TXN_READ_ONLY; syncWithPlugin(id, plugin, forceUpdate, isFullSync, requiresNew, visitedZoneIds, allZoneIds); } } catch (NoSuchBeanDefinitionException e) { // Ignore and continue } } } catch (RuntimeException e) { TenantChainingUserRegistrySynchronizer.logger.error("Synchronization aborted due to error", e); throw e; } // Release the lock if necessary finally { if (lockToken != null) { // Cancel the lock refresher // Because we may hit a perfect storm when trying to interrupt // workers in their unsynchronized getTask() // method we can't wait indefinitely and may have to retry the // shutdown int trys = 0; do { lockRefresher.shutdown(); try { lockRefresher.awaitTermination(TenantChainingUserRegistrySynchronizer.LOCK_TTL, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { } } while (!lockRefresher.isTerminated() && trys++ < 3); if (!lockRefresher.isTerminated()) { lockRefresher.shutdownNow(); TenantChainingUserRegistrySynchronizer.logger.error("Failed to shut down lock refresher"); } final String token = lockToken; this.transactionService.getRetryingTransactionHelper() .doInTransaction(new RetryingTransactionCallback<Object>() { public Object execute() throws Throwable { TenantChainingUserRegistrySynchronizer.this.jobLockService.releaseLock(token, TenantChainingUserRegistrySynchronizer.LOCK_QNAME); return null; } }, false, splitTxns); } } }
From source file:org.alfresco.repo.security.sync.ChainingUserRegistrySynchronizer.java
private void synchronizeInternal(boolean forceUpdate, boolean isFullSync, final boolean splitTxns) { if (ChainingUserRegistrySynchronizer.logger.isDebugEnabled()) { if (forceUpdate) { ChainingUserRegistrySynchronizer.logger.debug("Running a full sync."); } else {//from w w w.j ava 2 s .c o m ChainingUserRegistrySynchronizer.logger.debug("Running a differential sync."); } if (allowDeletions) { ChainingUserRegistrySynchronizer.logger.debug("deletions are allowed"); } else { ChainingUserRegistrySynchronizer.logger.debug("deletions are not allowed"); } // Don't proceed with the sync if the repository is read only if (this.transactionService.isReadOnly()) { ChainingUserRegistrySynchronizer.logger .warn("Unable to proceed with user registry synchronization. Repository is read only."); return; } } // Don't proceed with the sync if the repository is read only if (this.transactionService.isReadOnly()) { ChainingUserRegistrySynchronizer.logger .warn("Unable to proceed with user registry synchronization. Repository is read only."); return; } // Create a background executor that will refresh our lock. This means we can request a lock with a relatively // small persistence time and not worry about it lasting after server restarts. Note we use an independent // executor because this is a compound operation that spans accross multiple batch processors. String lockToken = null; TraceableThreadFactory threadFactory = new TraceableThreadFactory(); threadFactory.setNamePrefix("ChainingUserRegistrySynchronizer lock refresh"); threadFactory.setThreadDaemon(true); ScheduledExecutorService lockRefresher = new ScheduledThreadPoolExecutor(1, threadFactory); // Let's ensure all exceptions get logged try { // First, try to obtain a lock to ensure we are the only node trying to run this job try { if (splitTxns) { // If this is an automated sync on startup or scheduled sync, don't even wait around for the lock. // Assume the sync will be completed on another node. lockToken = this.transactionService.getRetryingTransactionHelper() .doInTransaction(new RetryingTransactionCallback<String>() { public String execute() throws Throwable { return ChainingUserRegistrySynchronizer.this.jobLockService.getLock( ChainingUserRegistrySynchronizer.LOCK_QNAME, ChainingUserRegistrySynchronizer.LOCK_TTL, 0, 1); } }, false, splitTxns); } else { // If this is a login-triggered sync, give it a few retries before giving up lockToken = this.jobLockService.getLock(ChainingUserRegistrySynchronizer.LOCK_QNAME, ChainingUserRegistrySynchronizer.LOCK_TTL, 3000, 10); } } catch (LockAcquisitionException e) { // Don't proceed with the sync if it is running on another node ChainingUserRegistrySynchronizer.logger.warn( "User registry synchronization already running in another thread. Synchronize aborted"); return; } // Schedule the lock refresh to run at regular intervals final String token = lockToken; lockRefresher.scheduleAtFixedRate(new Runnable() { public void run() { ChainingUserRegistrySynchronizer.this.transactionService.getRetryingTransactionHelper() .doInTransaction(new RetryingTransactionCallback<Object>() { public Object execute() throws Throwable { ChainingUserRegistrySynchronizer.this.jobLockService.refreshLock(token, ChainingUserRegistrySynchronizer.LOCK_QNAME, ChainingUserRegistrySynchronizer.LOCK_TTL); return null; } }, false, splitTxns); } }, ChainingUserRegistrySynchronizer.LOCK_TTL / 2, ChainingUserRegistrySynchronizer.LOCK_TTL / 2, TimeUnit.MILLISECONDS); Set<String> visitedZoneIds = new TreeSet<String>(); Collection<String> instanceIds = this.applicationContextManager.getInstanceIds(); // Work out the set of all zone IDs in the authentication chain so that we can decide which users / groups // need 're-zoning' Set<String> allZoneIds = new TreeSet<String>(); for (String id : instanceIds) { allZoneIds.add(AuthorityService.ZONE_AUTH_EXT_PREFIX + id); } // Collect the plugins that we can sync : zoneId, plugin Map<String, UserRegistry> plugins = new HashMap<String, UserRegistry>(); for (String id : instanceIds) { UserRegistry plugin; try { ApplicationContext context = this.applicationContextManager.getApplicationContext(id); plugin = (UserRegistry) context.getBean(this.sourceBeanName); } catch (RuntimeException e) { // The bean doesn't exist or this subsystem won't start. The reason would have been logged. Ignore and continue. continue; } if (!(plugin instanceof ActivateableBean) || ((ActivateableBean) plugin).isActive()) { // yes this plugin needs to be synced plugins.put(id, plugin); } } /** * Sync starts here */ notifySyncStart(plugins.keySet()); for (String id : instanceIds) { UserRegistry plugin = plugins.get(id); if (plugin != null) { // If debug is enabled then dump out the contents of the authentication JMX bean if (ChainingUserRegistrySynchronizer.logger.isDebugEnabled()) { mbeanServer = (MBeanServerConnection) getApplicationContext() .getBean("alfrescoMBeanServer"); try { StringBuilder nameBuff = new StringBuilder(200) .append("Alfresco:Type=Configuration,Category=Authentication,id1=managed,id2=") .append(URLDecoder.decode(id, "UTF-8")); ObjectName name = new ObjectName(nameBuff.toString()); if (mbeanServer != null && mbeanServer.isRegistered(name)) { MBeanInfo info = mbeanServer.getMBeanInfo(name); MBeanAttributeInfo[] attributes = info.getAttributes(); ChainingUserRegistrySynchronizer.logger.debug(id + " attributes:"); for (MBeanAttributeInfo attribute : attributes) { Object value = mbeanServer.getAttribute(name, attribute.getName()); ChainingUserRegistrySynchronizer.logger .debug(attribute.getName() + " = " + value); } } } catch (UnsupportedEncodingException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (MalformedObjectNameException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (InstanceNotFoundException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (IntrospectionException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (AttributeNotFoundException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (ReflectionException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (MBeanException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } catch (IOException e) { if (ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger.warn("Exception during logging", e); } } } // end of debug dump of active JMX bean if (ChainingUserRegistrySynchronizer.logger.isInfoEnabled()) { ChainingUserRegistrySynchronizer.logger .info("Synchronizing users and groups with user registry '" + id + "'"); } if (isFullSync && ChainingUserRegistrySynchronizer.logger.isWarnEnabled()) { ChainingUserRegistrySynchronizer.logger .warn("Full synchronization with user registry '" + id + "'"); if (allowDeletions) { ChainingUserRegistrySynchronizer.logger.warn( "Some users and groups previously created by synchronization with this user registry may be removed."); } else { ChainingUserRegistrySynchronizer.logger.warn( "Deletions are disabled. Users and groups removed from this registry will be logged only and will remain in the repository. Users previously found in a different registry will be moved in the repository rather than recreated."); } } // Work out whether we should do the work in a separate transaction (it's most performant if we // bunch it into small transactions, but if we are doing a sync on login, it has to be the same // transaction) boolean requiresNew = splitTxns || AlfrescoTransactionSupport.getTransactionReadState() == TxnReadState.TXN_READ_ONLY; try { /** * Do the sync with the specified plugin */ syncWithPlugin(id, plugin, forceUpdate, isFullSync, requiresNew, visitedZoneIds, allZoneIds); this.applicationEventPublisher.publishEvent(new SynchronizeDirectoryEndEvent(this, id)); } catch (final RuntimeException e) { notifySyncDirectoryEnd(id, e); throw e; } } // if plugin exists } // for each instanceId //End of successful synchronization here notifySyncEnd(); } catch (final RuntimeException e) { notifySyncEnd(e); ChainingUserRegistrySynchronizer.logger.error("Synchronization aborted due to error", e); throw e; } finally { // Release the lock if necessary if (lockToken != null) { // Cancel the lock refresher // Because we may hit a perfect storm when trying to interrupt workers in their unsynchronized getTask() // method we can't wait indefinitely and may have to retry the shutdown int trys = 0; do { lockRefresher.shutdown(); try { lockRefresher.awaitTermination(ChainingUserRegistrySynchronizer.LOCK_TTL, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { } } while (!lockRefresher.isTerminated() && trys++ < 3); if (!lockRefresher.isTerminated()) { lockRefresher.shutdownNow(); ChainingUserRegistrySynchronizer.logger.error("Failed to shut down lock refresher"); } final String token = lockToken; this.transactionService.getRetryingTransactionHelper() .doInTransaction(new RetryingTransactionCallback<Object>() { public Object execute() throws Throwable { ChainingUserRegistrySynchronizer.this.jobLockService.releaseLock(token, ChainingUserRegistrySynchronizer.LOCK_QNAME); return null; } }, false, splitTxns); } } }