List of usage examples for java.util Collections synchronizedList
public static <T> List<T> synchronizedList(List<T> list)
From source file:org.apache.tinkerpop.gremlin.groovy.engine.GremlinExecutorTest.java
@Test public void shouldAllowConcurrentModificationOfGlobals() throws Exception { // this test simulates a scenario that likely shouldn't happen - where globals are modified by multiple // threads. globals are created in a synchronized fashion typically but it's possible that someone // could do something like this and this test validate that concurrency exceptions don't occur as a // result/*from w w w. j a va2 s. co m*/ final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory); final Bindings globals = new SimpleBindings(); globals.put("g", -1); final GremlinExecutor gremlinExecutor = GremlinExecutor.build().globalBindings(globals).create(); final AtomicBoolean failed = new AtomicBoolean(false); final int max = 512; final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max)); IntStream.range(0, max).forEach(i -> { final int yValue = i * 2; final Bindings b = new SimpleBindings(); b.put("x", i); b.put("y", yValue); final int zValue = i * -1; final String script = "z=" + zValue + ";[x,y,z,g]"; try { service.submit(() -> { try { // modify the global in a separate thread gremlinExecutor.getGlobalBindings().put("g", i); gremlinExecutor.getGlobalBindings().put(Integer.toString(i), i); gremlinExecutor.getGlobalBindings().keySet().stream() .filter(s -> i % 2 == 0 && !s.equals("g")).findFirst().ifPresent(globals::remove); final List<Integer> result = (List<Integer>) gremlinExecutor.eval(script, b).get(); futures.add(Pair.with(i, result)); } catch (Exception ex) { failed.set(true); } }); } catch (Exception ex) { throw new RuntimeException(ex); } }); service.shutdown(); assertThat(service.awaitTermination(60000, TimeUnit.MILLISECONDS), is(true)); // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this // test is partially designed to protected against. assertThat(failed.get(), is(false)); assertEquals(max, futures.size()); futures.forEach(t -> { assertEquals(t.getValue0(), t.getValue1().get(0)); assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue()); assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue()); assertThat(t.getValue1().get(3).intValue(), greaterThan(-1)); }); }
From source file:org.pentaho.platform.engine.core.system.PentahoSystem.java
public static void addInitializationFailureMessage(final int failureBit, final String message) { Integer i = new Integer(failureBit); List l = (List) PentahoSystem.initializationFailureDetailsMap.get(i); if (l == null) { l = Collections.synchronizedList(new ArrayList()); PentahoSystem.initializationFailureDetailsMap.put(i, l); }//from w w w.j a v a 2 s .c o m final String msg = " " + message; //$NON-NLS-1$ if (!l.contains(msg)) { l.add(msg); } }
From source file:net.schweerelos.parrot.ui.GraphViewComponent.java
private void fireNodeSelected(NodeWrapper newSelection) { List<PickListener> listeners; synchronized (this) { listeners = Collections.synchronizedList(pickListeners); }/*from ww w . j av a2s. co m*/ synchronized (listeners) { for (PickListener listener : listeners) { try { listener.picked(newSelection); } catch (RuntimeException re) { re.printStackTrace(); pickListeners.remove(listener); } } } }
From source file:org.olat.commons.calendar.ICalFileCalendarManagerTest.java
/** * Test concurrent add/delete event with two threads and code-point to control concurrency. * *//* ww w .ja v a 2 s .c o m*/ @Test public void testConcurrentAddRemoveEvent() { final String TEST_EVENT_ID_1 = "id-testConcurrentAddRemoveEvent-1"; final String TEST_EVENT_SUBJECT_1 = "testEvent1"; final String TEST_EVENT_ID_2 = "id-testConcurrentAddRemoveEvent-2"; final String TEST_EVENT_SUBJECT_2 = "testEvent2"; final Identity test = JunitTestHelper.createAndPersistIdentityAsRndUser("ical-1-"); final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>(1)); final List<Boolean> statusList = Collections.synchronizedList(new ArrayList<Boolean>(1)); // Generate event for update CalendarManager calManager = CalendarManagerFactory.getInstance().getCalendarManager(); Kalendar cal = calManager.getPersonalCalendar(test).getKalendar(); calManager.addEventTo(cal, new KalendarEvent(TEST_EVENT_ID_2, TEST_EVENT_SUBJECT_2, new Date(), 1)); cal = calManager.getPersonalCalendar(test).getKalendar(); KalendarEvent event2 = cal.getEvent(TEST_EVENT_ID_2); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_2, event2); assertEquals("Wrong calendar-event subject", event2.getSubject(), TEST_EVENT_SUBJECT_2); log.info("testConcurrentAddRemoveEvent thread2 addEvent2 DONE"); final CountDownLatch doneSignal = new CountDownLatch(2); // thread 1 Thread thread1 = new Thread() { public void run() { try { // 1. load calendar CalendarManager calendarManager = CalendarManagerFactory.getInstance().getCalendarManager(); Kalendar calendar = calendarManager.getPersonalCalendar(test).getKalendar(); // 2. add Event1 => breakpoint hit log.info("testConcurrentAddRemoveEvent thread1 addEvent1"); calendarManager.addEventTo(calendar, new KalendarEvent(TEST_EVENT_ID_1, TEST_EVENT_SUBJECT_1, new Date(), 1)); log.info("testConcurrentAddRemoveEvent thread1 addEvent1 DONE"); // 3. check event1 exist calendar = calendarManager.getPersonalCalendar(test).getKalendar(); KalendarEvent event1 = calendar.getEvent(TEST_EVENT_ID_1); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_1, event1); assertEquals("Wrong calendar-event subject", event1.getSubject(), TEST_EVENT_SUBJECT_1); // 4. sleep 2sec // 5. check event1 still exist (event2 added in meantime) calendar = calendarManager.getPersonalCalendar(test).getKalendar(); event1 = calendar.getEvent(TEST_EVENT_ID_1); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_1, event1); assertEquals("Wrong calendar-event subject", event1.getSubject(), TEST_EVENT_SUBJECT_1); statusList.add(Boolean.TRUE); log.info("testConcurrentAddRemoveEvent thread1 finished"); } catch (Exception ex) { exceptionHolder.add(ex);// no exception should happen } finally { doneSignal.countDown(); DBFactory.getInstance().commitAndCloseSession(); } } }; // thread 2 Thread thread2 = new Thread() { public void run() { try { CalendarManager calendarManager = CalendarManagerFactory.getInstance().getCalendarManager(); Kalendar calendar = calendarManager.getPersonalCalendar(test).getKalendar(); // 3. add Event2 (breakpoint of thread1 blocks) log.info("testConcurrentAddRemoveEvent thread2 removeEvent2"); boolean removed = calendarManager.removeEventFrom(calendar, new KalendarEvent(TEST_EVENT_ID_2, TEST_EVENT_SUBJECT_2, new Date(), 1)); assertTrue(removed); log.info("testConcurrentAddRemoveEvent thread1 removeEvent2 DONE"); // 4. check event2 exist calendar = calendarManager.getPersonalCalendar(test).getKalendar(); KalendarEvent updatedEvent = calendar.getEvent(TEST_EVENT_ID_2); assertNull("Still found deleted event with id=" + TEST_EVENT_ID_2, updatedEvent); // 5. check event1 exist calendar = calendarManager.getPersonalCalendar(test).getKalendar(); KalendarEvent event1 = calendar.getEvent(TEST_EVENT_ID_1); assertNotNull("Did not found event with id=" + TEST_EVENT_ID_1, event1); assertEquals("Wrong calendar-event subject", event1.getSubject(), TEST_EVENT_SUBJECT_1); statusList.add(Boolean.TRUE); log.info("testConcurrentAddRemoveEvent thread2 finished"); } catch (Exception ex) { exceptionHolder.add(ex);// no exception should happen } finally { doneSignal.countDown(); DBFactory.getInstance().commitAndCloseSession(); } } }; thread1.start(); thread2.start(); try { boolean interrupt = doneSignal.await(10, TimeUnit.SECONDS); assertTrue("Test takes too long (more than 10s)", interrupt); } catch (InterruptedException e) { fail("" + e.getMessage()); } // if not -> they are in deadlock and the db did not detect it for (Exception exception : exceptionHolder) { log.info("exception: " + exception.getMessage()); exception.printStackTrace(); } assertTrue("It throws an exception in test => see sysout", exceptionHolder.isEmpty()); log.info("testConcurrentAddRemoveEvent finish successful"); }
From source file:com.cloudant.sync.indexing.IndexManagerIndexTest.java
/** * A sanity-check that updating the datastore from many threads * doesn't cause the index manager to balk. *///from w w w .jav a 2s . c om @Test public void index_UpdateCrudMultiThreaded() throws IndexExistsException, SQLException, ConflictException, InterruptedException { int n_threads = 3; final int n_docs = 100; // We'll later search for search == success final Map<String, String> matching = ImmutableMap.of("search", "success"); final Map<String, String> nonmatching = ImmutableMap.of("search", "failure"); indexManager.ensureIndexed("search", "search", IndexType.STRING); final List<String> matching_ids = Collections.synchronizedList(new ArrayList<String>()); // When run, this thread creates n_docs documents with unique // names in the datastore. A subset of these // will be matched by our query to the datastore later, which // we record in the matching_ids list. class PopulateThread extends Thread { @Override public void run() { String docId; final String thread_id; DocumentBody body; thread_id = Thread.currentThread().getName(); for (int i = 0; i < n_docs; i++) { docId = String.format("%s-%s", thread_id, i); if ((i % 2) == 0) { // even numbers create matching docs body = DocumentBodyFactory.create(matching); matching_ids.add(docId); } else { body = DocumentBodyFactory.create(nonmatching); } datastore.createDocument(docId, body); } // we're not on the main thread, so we must close our own connection datastore.getSQLDatabase().close(); } } List<Thread> threads = new ArrayList<Thread>(); // Create, start and wait for the threads to complete for (int i = 0; i < n_threads; i++) { threads.add(new PopulateThread()); } for (Thread t : threads) { t.start(); } for (Thread t : threads) { t.join(); } // Check appropriate entries in index QueryBuilder q = new QueryBuilder(); q.index("search").equalTo("success"); QueryResult result = indexManager.query(q.build()); List<DocumentRevision> docRevisions = Lists.newArrayList(result); List<String> docIds = new ArrayList<String>(); for (DocumentRevision r : docRevisions) { docIds.add(r.getId()); } Assert.assertEquals(matching_ids.size(), docIds.size()); for (String id : matching_ids) { Assert.assertTrue(docIds.contains(id)); } }
From source file:org.springframework.batch.core.jsr.launch.JsrJobOperator.java
/** * Creates a child {@link ApplicationContext} for the job being requested based upon * the /META-INF/batch.xml (if exists) and the /META-INF/batch-jobs/<jobName>.xml * configuration and launches the job. Per JSR-352, calls to this method will always * create a new {@link JobInstance} (and related {@link JobExecution}). * * @param jobName the name of the job XML file without the .xml that is located within the * /META-INF/batch-jobs directory./*from w w w.j av a 2 s . co m*/ * @param params any job parameters to be used during the execution of this job. */ @Override public long start(String jobName, Properties params) throws JobStartException, JobSecurityException { final JsrXmlApplicationContext batchContext = new JsrXmlApplicationContext(params); batchContext.setValidating(false); Resource batchXml = new ClassPathResource("/META-INF/batch.xml"); String jobConfigurationLocation = "/META-INF/batch-jobs/" + jobName + ".xml"; Resource jobXml = new ClassPathResource(jobConfigurationLocation); if (batchXml.exists()) { batchContext.load(batchXml); } if (jobXml.exists()) { batchContext.load(jobXml); } AbstractBeanDefinition beanDefinition = BeanDefinitionBuilder .genericBeanDefinition("org.springframework.batch.core.jsr.JsrJobContextFactoryBean") .getBeanDefinition(); beanDefinition.setScope(BeanDefinition.SCOPE_SINGLETON); batchContext.registerBeanDefinition(JSR_JOB_CONTEXT_BEAN_NAME, beanDefinition); if (baseContext != null) { batchContext.setParent(baseContext); } else { batchContext.getBeanFactory().registerSingleton("jobExplorer", jobExplorer); batchContext.getBeanFactory().registerSingleton("jobRepository", jobRepository); batchContext.getBeanFactory().registerSingleton("jobParametersConverter", jobParametersConverter); batchContext.getBeanFactory().registerSingleton("transactionManager", transactionManager); } try { batchContext.refresh(); } catch (BeanCreationException e) { throw new JobStartException(e); } Assert.notNull(jobName, "The job name must not be null."); final org.springframework.batch.core.JobExecution jobExecution; try { JobParameters jobParameters = jobParametersConverter.getJobParameters(params); String[] jobNames = batchContext.getBeanNamesForType(Job.class); if (jobNames == null || jobNames.length <= 0) { throw new BatchRuntimeException("No Job defined in current context"); } org.springframework.batch.core.JobInstance jobInstance = jobRepository.createJobInstance(jobNames[0], jobParameters); jobExecution = jobRepository.createJobExecution(jobInstance, jobParameters, jobConfigurationLocation); } catch (Exception e) { throw new JobStartException(e); } try { final Semaphore semaphore = new Semaphore(1); final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>()); semaphore.acquire(); taskExecutor.execute(new Runnable() { @Override public void run() { JsrJobContextFactoryBean factoryBean = null; try { factoryBean = (JsrJobContextFactoryBean) batchContext .getBean("&" + JSR_JOB_CONTEXT_BEAN_NAME); factoryBean.setJobExecution(jobExecution); final Job job = batchContext.getBean(Job.class); semaphore.release(); // Initialization of the JobExecution for job level dependencies jobRegistry.register(job, jobExecution); job.execute(jobExecution); jobRegistry.remove(jobExecution); } catch (Exception e) { exceptionHolder.add(e); } finally { if (factoryBean != null) { factoryBean.close(); } batchContext.close(); if (semaphore.availablePermits() == 0) { semaphore.release(); } } } }); semaphore.acquire(); if (exceptionHolder.size() > 0) { semaphore.release(); throw new JobStartException(exceptionHolder.get(0)); } } catch (Exception e) { if (jobRegistry.exists(jobExecution.getId())) { jobRegistry.remove(jobExecution); } jobExecution.upgradeStatus(BatchStatus.FAILED); if (jobExecution.getExitStatus().equals(ExitStatus.UNKNOWN)) { jobExecution.setExitStatus(ExitStatus.FAILED.addExitDescription(e)); } jobRepository.update(jobExecution); if (batchContext.isActive()) { batchContext.close(); } throw new JobStartException(e); } return jobExecution.getId(); }
From source file:com.netcrest.pado.tools.pado.PadoShell.java
/** * Executes the specified PadoShell command string. * // w w w .j av a 2 s .co m * @param commandString * Command string * @param isResetTimer * true to reset the timer. * @return true if the specified command is successfully executed, false * otherwise. */ public boolean runCommand(String commandString, boolean isResetTimer) { String commandName = null; if (commandString != null) { commandString = commandString.trim(); commandString = expandEnvs(commandString); if (isEcho()) { println(commandString); } String[] split = commandString.split(" "); commandName = split[0]; if (commandName.endsWith("&")) { commandName = commandName.substring(0, commandName.length() - 1); } } // Add quit/exit command to history before executing it. // if (commandString == null /* EOF */|| commandString.endsWith("exit") || commandString.endsWith("quit")) { // addCmdHistory(commandString); // } try { ICommand command = getCommand(commandName); if (command != null) { // CommandLine commandLine = // cliParseCommandLine(command.getOptions(), commandString); // if (commandLine.hasOption('?')) { // command.help(); // } else { // if (command.isLoginRequired() && // SharedCache.getSharedCache().isLoggedIn() == false) { // printlnError(command, "Not logged in. Command aborted."); // return false; // } // startTimer(); // command.run(commandLine, commandString); // stopTimer(); // if (!commandName.equals("r")) { // addCmdHistory(commandString); // } // } } else { // Some command name exceptions... if (commandString.matches("\\(.*select.*")) { command = getCommand("select"); } else if (commandString.startsWith("?")) { command = getCommand("help"); } } if (command == null) { // the specified command not supported if (commandString.length() != 0) { printlnError("Unrecognized command. Enter Tab, 'help' or '?' to get a list of commands."); } return false; } else { boolean isBackground = commandString.endsWith("&"); CommandLine commandLine; if (isBackground) { commandLine = cliParseCommandLine(command.getOptions(), commandString.substring(0, commandString.length() - 1)); } else { commandLine = cliParseCommandLine(command.getOptions(), commandString); } if (commandLine.hasOption('?')) { command.help(); } else { if (command.isLoginRequired() && SharedCache.getSharedCache().isLoggedIn() == false) { printlnError(command, "Not logged in. Command aborted."); return false; } if (isBackground) { if (backgroundThreadPool == null) { backgroundThreadPool = Executors.newCachedThreadPool(); backgroundCommandRunnerList = Collections .synchronizedList(new ArrayList<BackgroundCommandRunner>(10)); } int jobNumber = 1; if (backgroundCommandRunnerList.size() > 0) { BackgroundCommandRunner runner = backgroundCommandRunnerList .get(backgroundCommandRunnerList.size() - 1); jobNumber = runner.jobNumber + 1; } BackgroundCommandRunner runner = new BackgroundCommandRunner(jobNumber, command, commandLine, commandString); backgroundCommandRunnerList.add(runner); backgroundThreadPool.execute(runner); } else { if (isResetTimer) { startTimer(); } command.run(commandLine, commandString); if (isResetTimer) { stopTimer(); } } if (command instanceof logout) { reset(); } } } } catch (Exception ex) { printlnError(getCauseMessage(ex)); if (DEBUG_ENABLED) { ex.printStackTrace(); } return false; } return true; }
From source file:voldemort.store.routed.RoutedStore.java
public void put(final ByteArray key, final Versioned<byte[]> versioned) throws VoldemortException { long startNs = System.nanoTime(); StoreUtils.assertValidKey(key);/*from ww w .j a v a 2s .co m*/ final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get())); // quickly fail if there aren't enough nodes to meet the requirement final int numNodes = nodes.size(); if (numNodes < this.storeDef.getRequiredWrites()) throw new InsufficientOperationalNodesException("Only " + numNodes + " nodes in preference list, but " + this.storeDef.getRequiredWrites() + " writes required."); // A count of the number of successful operations final AtomicInteger successes = new AtomicInteger(0); // A list of thrown exceptions, indicating the number of failures final List<Exception> failures = Collections.synchronizedList(new ArrayList<Exception>(1)); // If requiredWrites > 0 then do a single blocking write to the first // live node in the preference list if this node throws an // ObsoleteVersionException allow it to propagate Node master = null; int currentNode = 0; Versioned<byte[]> versionedCopy = null; for (; currentNode < numNodes; currentNode++) { Node current = nodes.get(currentNode); long startNsLocal = System.nanoTime(); try { versionedCopy = incremented(versioned, current.getId()); innerStores.get(current.getId()).put(key, versionedCopy); successes.getAndIncrement(); recordSuccess(current, startNsLocal); master = current; break; } catch (UnreachableStoreException e) { recordException(current, startNsLocal, e); failures.add(e); } catch (VoldemortApplicationException e) { throw e; } catch (Exception e) { failures.add(e); } } if (successes.get() < 1) throw new InsufficientOperationalNodesException("No master node succeeded!", failures.size() > 0 ? failures.get(0) : null); else currentNode++; // A semaphore indicating the number of completed operations // Once inititialized all permits are acquired, after that // permits are released when an operation is completed. // semaphore.acquire(n) waits for n operations to complete final Versioned<byte[]> finalVersionedCopy = versionedCopy; final Semaphore semaphore = new Semaphore(0, false); // Add the operations to the pool int attempts = 0; for (; currentNode < numNodes; currentNode++) { attempts++; final Node node = nodes.get(currentNode); this.executor.execute(new Runnable() { public void run() { long startNsLocal = System.nanoTime(); try { innerStores.get(node.getId()).put(key, finalVersionedCopy); successes.incrementAndGet(); recordSuccess(node, startNsLocal); } catch (UnreachableStoreException e) { recordException(node, startNsLocal, e); failures.add(e); } catch (ObsoleteVersionException e) { // ignore this completely here // this means that a higher version was able // to write on this node and should be termed as clean // success. } catch (VoldemortApplicationException e) { throw e; } catch (Exception e) { logger.warn("Error in PUT on node " + node.getId() + "(" + node.getHost() + ")", e); failures.add(e); } finally { // signal that the operation is complete semaphore.release(); } } }); } // Block until we get enough completions int blockCount = Math.min(storeDef.getPreferredWrites() - 1, attempts); boolean noTimeout = blockOnPut(startNs, semaphore, 0, blockCount, successes, storeDef.getPreferredWrites()); if (successes.get() < storeDef.getRequiredWrites()) { /* * We don't have enough required writes, but we haven't timed out * yet, so block a little more if there are healthy nodes that can * help us achieve our target. */ if (noTimeout) { int startingIndex = blockCount - 1; blockCount = Math.max(storeDef.getPreferredWrites() - 1, attempts); blockOnPut(startNs, semaphore, startingIndex, blockCount, successes, storeDef.getRequiredWrites()); } if (successes.get() < storeDef.getRequiredWrites()) throw new InsufficientOperationalNodesException(successes.get() + " writes succeeded, but " + this.storeDef.getRequiredWrites() + " are required.", failures); } // Okay looks like it worked, increment the version for the caller VectorClock versionedClock = (VectorClock) versioned.getVersion(); versionedClock.incrementVersion(master.getId(), time.getMilliseconds()); }
From source file:voldemort.client.rebalance.AbstractNonZonedRebalanceTest.java
@Test(timeout = 600000) public void testProxyGetDuringRebalancing() throws Exception { logger.info("Starting testProxyGetDuringRebalancing"); try {/* w w w.j a va 2 s. c o m*/ final Cluster currentCluster = ServerTestUtils.getLocalCluster(2, new int[][] { { 0, 1, 2, 3, 4, 5, 6 }, { 7, 8 } }); final Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 1, Lists.newArrayList(2, 3)); // start servers 0 , 1 only final List<Integer> serverList = Arrays.asList(0, 1); Map<String, String> configProps = new HashMap<String, String>(); configProps.put("admin.max.threads", "5"); final Cluster updatedCurrentCluster = startServers(currentCluster, storeDefFileWithReplication, serverList, configProps); ExecutorService executors = Executors.newFixedThreadPool(2); final AtomicBoolean rebalancingComplete = new AtomicBoolean(false); final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>()); String bootstrapUrl = getBootstrapUrl(currentCluster, 0); int maxParallel = 2; final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster); // Populate the two stores populateData(updatedCurrentCluster, roStoreDefWithReplication, rebalanceKit.controller.getAdminClient(), true); populateData(updatedCurrentCluster, rwStoreDefWithReplication, rebalanceKit.controller.getAdminClient(), false); final SocketStoreClientFactory factory = new SocketStoreClientFactory( new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)) .setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS)); final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>( testStoreNameRW, null, factory, 3); final StoreClient<String, String> storeClientRO = new DefaultStoreClient<String, String>( testStoreNameRO, null, factory, 3); final CountDownLatch latch = new CountDownLatch(2); // start get operation. executors.execute(new Runnable() { @Override public void run() { try { List<String> keys = new ArrayList<String>(testEntries.keySet()); while (!rebalancingComplete.get()) { // should always able to get values. int index = (int) (Math.random() * keys.size()); // should get a valid value try { Versioned<String> value = storeClientRW.get(keys.get(index)); assertNotSame("StoreClient get() should not return null.", null, value); assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), value); value = storeClientRO.get(keys.get(index)); assertNotSame("StoreClient get() should not return null.", null, value); assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), value); } catch (Exception e) { logger.error("Exception in online thread", e); exceptions.add(e); } finally { latch.countDown(); } } } catch (Exception e) { logger.error("Exception in proxy get thread", e); exceptions.add(e); } finally { factory.close(); } } }); executors.execute(new Runnable() { @Override public void run() { try { Thread.sleep(500); rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, Arrays.asList(0, 1)); Thread.sleep(500); rebalancingComplete.set(true); checkConsistentMetadata(finalCluster, serverList); } catch (Exception e) { exceptions.add(e); logger.error("Exception in rebalancing thread", e); } finally { // stop servers try { stopServer(serverList); } catch (Exception e) { throw new RuntimeException(e); } latch.countDown(); } } }); latch.await(); executors.shutdown(); executors.awaitTermination(300, TimeUnit.SECONDS); // check No Exception if (exceptions.size() > 0) { for (Exception e : exceptions) { e.printStackTrace(); } fail("Should not see any exceptions."); } } catch (AssertionError ae) { logger.error("Assertion broken in testProxyGetDuringRebalancing ", ae); throw ae; } }
From source file:com.gemini.provision.network.openstack.NetworkProviderOpenStackImpl.java
@Override public List<ProvisioningProviderResponseType> bulkCreateSubnet(GeminiTenant tenant, GeminiEnvironment env, GeminiNetwork parent, List<GeminiSubnet> subnets) { List<ProvisioningProviderResponseType> retValues = Collections.synchronizedList(new ArrayList()); //TODO: Only the first element is set ... NEED to research whether it is possible to get the current position from the stream subnets.stream().forEach(n -> retValues.set(0, createSubnet(tenant, env, parent, n))); return retValues; }