List of usage examples for java.util Collections synchronizedList
public static <T> List<T> synchronizedList(List<T> list)
From source file:org.apache.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngineTest.java
@Test public void shouldAllowVariableReuseAcrossThreads() throws Exception { final BasicThreadFactory testingThreadFactory = new BasicThreadFactory.Builder() .namingPattern("test-gremlin-scriptengine-%d").build(); final ExecutorService service = Executors.newFixedThreadPool(8, testingThreadFactory); final GremlinGroovyScriptEngine scriptEngine = new GremlinGroovyScriptEngine(); final AtomicBoolean failed = new AtomicBoolean(false); final int max = 512; final List<Pair<Integer, List<Integer>>> futures = Collections.synchronizedList(new ArrayList<>(max)); IntStream.range(0, max).forEach(i -> { final int yValue = i * 2; final int zValue = i * -1; final Bindings b = new SimpleBindings(); b.put("x", i); b.put("y", yValue); final String script = "z=" + zValue + ";[x,y,z]"; try {// w ww. jav a2 s .co m service.submit(() -> { try { final List<Integer> result = (List<Integer>) scriptEngine.eval(script, b); futures.add(Pair.with(i, result)); } catch (Exception ex) { failed.set(true); } }); } catch (Exception ex) { throw new RuntimeException(ex); } }); service.shutdown(); assertThat(service.awaitTermination(120000, TimeUnit.MILLISECONDS), is(true)); // likely a concurrency exception if it occurs - and if it does then we've messed up because that's what this // test is partially designed to protected against. assertThat(failed.get(), is(false)); assertEquals(max, futures.size()); futures.forEach(t -> { assertEquals(t.getValue0(), t.getValue1().get(0)); assertEquals(t.getValue0() * 2, t.getValue1().get(1).intValue()); assertEquals(t.getValue0() * -1, t.getValue1().get(2).intValue()); }); }
From source file:com.gargoylesoftware.htmlunit.WebClientWaitForBackgroundJobsTest.java
/** * {@link WebClient#waitForBackgroundJavaScript(long)} should have an overview of all windows. * @throws Exception if the test fails/* w w w . j ava 2s .c om*/ */ @Test public void jobSchedulesJobInOtherWindow1() throws Exception { final String html = "<html>\n" + "<head>\n" + " <script>\n" + " var counter = 0;\n" + " function test() {\n" + " var w = window.open('about:blank');\n" + " w.setTimeout(doWork1, 200);\n" + " }\n" + " function doWork1() {\n" + " alert('work1');\n" + " setTimeout(doWork2, 400);\n" + " }\n" + " function doWork2() {\n" + " alert('work2');\n" + " }\n" + " </script>\n" + "</head>\n" + "<body onload='test()'>\n" + "</body>\n" + "</html>"; final List<String> collectedAlerts = Collections.synchronizedList(new ArrayList<String>()); final HtmlPage page = loadPage(html, collectedAlerts); startTimedTest(); assertEquals(0, page.getWebClient().waitForBackgroundJavaScript(1000)); assertMaxTestRunTime(1000); final String[] expectedAlerts = { "work1", "work2" }; assertEquals(expectedAlerts, collectedAlerts); }
From source file:org.lightjason.agentspeak.action.builtin.TestCActionMathStatistics.java
/** * test linear selection//w w w. j a va2 s.co m */ @Test public final void linearselection() { final List<ITerm> l_return = Collections.synchronizedList(new ArrayList<>()); IntStream.range(0, 6500).parallel() .forEach(i -> new CLinearSelection().execute(false, IContext.EMPTYPLAN, Stream.of(Stream.of("c", "d").collect(Collectors.toList()), Stream.of(3, 7).collect(Collectors.toList())).map(CRawTerm::from) .collect(Collectors.toList()), l_return)); Assert.assertEquals( (double) Collections.frequency(l_return.stream().map(ITerm::raw).collect(Collectors.toList()), "c") / l_return.size(), 0.3, 0.05); Assert.assertEquals( (double) Collections.frequency(l_return.stream().map(ITerm::raw).collect(Collectors.toList()), "d") / l_return.size(), 0.7, 0.05); }
From source file:org.apache.hadoop.hdfs.server.namenode.FSImage.java
void doUpgrade(FSNamesystem target) throws IOException { checkUpgrade();/*from w ww . j a v a 2 s .c o m*/ // load the latest image this.loadFSImage(target, StartupOption.UPGRADE, null); // Do upgrade for each directory target.checkRollingUpgrade("upgrade namenode"); long oldCTime = storage.getCTime(); storage.cTime = now(); // generate new cTime for the state int oldLV = storage.getLayoutVersion(); storage.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION; List<StorageDirectory> errorSDs = Collections.synchronizedList(new ArrayList<StorageDirectory>()); assert !editLog.isSegmentOpen() : "Edits log must not be open."; LOG.info("Starting upgrade of local storage directories." + "\n old LV = " + oldLV + "; old CTime = " + oldCTime + ".\n new LV = " + storage.getLayoutVersion() + "; new CTime = " + storage.getCTime()); // Do upgrade for each directory for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); try { NNUpgradeUtil.doPreUpgrade(conf, sd); } catch (Exception e) { LOG.error("Failed to move aside pre-upgrade storage " + "in image directory " + sd.getRoot(), e); errorSDs.add(sd); continue; } } if (target.isHaEnabled()) { editLog.doPreUpgradeOfSharedLog(); } storage.reportErrorsOnDirectories(errorSDs); errorSDs.clear(); saveFSImageInAllDirs(target, editLog.getLastWrittenTxId()); // upgrade shared edit storage first if (target.isHaEnabled()) { editLog.doUpgradeOfSharedLog(); } for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); try { NNUpgradeUtil.doUpgrade(sd, storage); } catch (IOException ioe) { errorSDs.add(sd); continue; } } storage.reportErrorsOnDirectories(errorSDs); isUpgradeFinalized = false; if (!storage.getRemovedStorageDirs().isEmpty()) { // during upgrade, it's a fatal error to fail any storage directory throw new IOException("Upgrade failed in " + storage.getRemovedStorageDirs().size() + " storage directory(ies), previously logged."); } }
From source file:com.gargoylesoftware.htmlunit.javascript.host.xml.XMLHttpRequest3Test.java
/** * Test for a strange error we found: An ajax running * in parallel shares the additional headers with a form * submit./*from ww w . ja va2 s. co m*/ * * @throws Exception if an error occurs */ @Test public void ajaxInfluencesSubmitHeaders() throws Exception { final Map<String, Class<? extends Servlet>> servlets = new HashMap<>(); servlets.put("/content.html", ContentServlet.class); servlets.put("/ajax_headers.html", AjaxHeaderServlet.class); servlets.put("/form_headers.html", FormHeaderServlet.class); startWebServer("./", null, servlets); collectedHeaders_.clear(); final WebClient client = getWebClient(); final List<String> collectedAlerts = Collections.synchronizedList(new ArrayList<String>()); client.setAlertHandler(new CollectingAlertHandler(collectedAlerts)); assertEquals(0, client.waitForBackgroundJavaScriptStartingBefore(100)); final HtmlPage page = client.getPage("http://localhost:" + PORT + "/content.html"); final DomElement elem = page.getElementById("doIt"); ((HtmlSubmitInput) elem).click(); Thread.sleep(400); // wait a bit to be sure, both request are out assertEquals(0, client.waitForBackgroundJavaScriptStartingBefore(1000)); String headers = collectedHeaders_.get(0); assertTrue(headers, headers.startsWith("Form: ")); assertFalse(headers, headers.contains("Html-Unit=is great,;")); headers = collectedHeaders_.get(1); assertTrue(headers, headers.startsWith("Ajax: ")); assertTrue(headers, headers.contains("Html-Unit=is great,;")); }
From source file:org.alfresco.repo.node.archive.NodeArchiveServiceImpl.java
/** * Finds the archive location for nodes that were deleted from the given store * and attempt to restore each node./* ww w. j a v a 2 s . c om*/ * * @see NodeService#getStoreArchiveNode(StoreRef) * @see #restoreArchivedNode(NodeRef, NodeRef, QName, QName) * * @deprecated In 3.4: to be removed */ public List<RestoreNodeReport> restoreAllArchivedNodes(final StoreRef originalStoreRef, final NodeRef destinationNodeRef, final QName assocTypeQName, final QName assocQName) { final String user = AuthenticationUtil.getFullyAuthenticatedUser(); if (user == null) { throw new IllegalStateException("Cannot restore as there is no authenticated user."); } final List<RestoreNodeReport> results = Collections .synchronizedList(new ArrayList<RestoreNodeReport>(1000)); /** * Worker that restores each node */ BatchProcessWorker<NodeRef> worker = new BatchProcessor.BatchProcessWorkerAdaptor<NodeRef>() { @Override public void beforeProcess() throws Throwable { AuthenticationUtil.pushAuthentication(); } public void process(NodeRef nodeRef) throws Throwable { AuthenticationUtil.setFullyAuthenticatedUser(user); if (nodeService.exists(nodeRef)) { RestoreNodeReport report = restoreArchivedNode(nodeRef, destinationNodeRef, assocTypeQName, assocQName); // Append the results (it is synchronized) results.add(report); } } @Override public void afterProcess() throws Throwable { AuthenticationUtil.popAuthentication(); } }; doBulkOperation(user, originalStoreRef, worker); return results; }
From source file:com.smartitengineering.cms.spi.impl.content.RubyGeneratorTest.java
@Test public void testMultiRubyRepGeneration() throws IOException { TypeRepresentationGenerator generator = new RubyRepresentationGenerator(); final RepresentationTemplate template = mockery.mock(RepresentationTemplate.class); WorkspaceAPIImpl impl = new WorkspaceAPIImpl() { @Override/*from w w w .j a va 2 s .c om*/ public RepresentationTemplate getRepresentationTemplate(WorkspaceId id, String name) { return template; } }; impl.setRepresentationGenerators(Collections.singletonMap(TemplateType.RUBY, generator)); final RepresentationProvider provider = new RepresentationProviderImpl(); final WorkspaceAPI api = impl; registerBeanFactory(api); final Content content = mockery.mock(Content.class); final Field field = mockery.mock(Field.class); final FieldValue value = mockery.mock(FieldValue.class); final Map<String, Field> fieldMap = mockery.mock(Map.class); final ContentType type = mockery.mock(ContentType.class); final Map<String, RepresentationDef> reps = mockery.mock(Map.class, "repMap"); final RepresentationDef def = mockery.mock(RepresentationDef.class); final int threadCount = new Random().nextInt(100); logger.info("Number of parallel threads " + threadCount); mockery.checking(new Expectations() { { exactly(threadCount).of(template).getTemplateType(); will(returnValue(TemplateType.RUBY)); exactly(threadCount).of(template).getTemplate(); final byte[] toByteArray = IOUtils.toByteArray( getClass().getClassLoader().getResourceAsStream("scripts/ruby/test-script.rb")); will(returnValue(toByteArray)); exactly(threadCount).of(template).getName(); will(returnValue(REP_NAME)); for (int i = 0; i < threadCount; ++i) { exactly(1).of(value).getValue(); will(returnValue(String.valueOf(i))); } exactly(threadCount).of(field).getValue(); will(returnValue(value)); exactly(threadCount).of(fieldMap).get(with(Expectations.<String>anything())); will(returnValue(field)); exactly(threadCount).of(content).getFields(); will(returnValue(fieldMap)); exactly(threadCount).of(content).getContentDefinition(); will(returnValue(type)); final ContentId contentId = mockery.mock(ContentId.class); exactly(2 * threadCount).of(content).getContentId(); will(returnValue(contentId)); final WorkspaceId wId = mockery.mock(WorkspaceId.class); exactly(threadCount).of(contentId).getWorkspaceId(); will(returnValue(wId)); exactly(2 * threadCount).of(type).getRepresentationDefs(); will(returnValue(reps)); exactly(2 * threadCount).of(reps).get(with(REP_NAME)); will(returnValue(def)); exactly(threadCount).of(def).getParameters(); will(returnValue(Collections.emptyMap())); exactly(threadCount).of(def).getMIMEType(); will(returnValue(GroovyGeneratorTest.MIME_TYPE)); final ResourceUri rUri = mockery.mock(ResourceUri.class); exactly(threadCount).of(def).getResourceUri(); will(returnValue(rUri)); exactly(threadCount).of(rUri).getValue(); will(returnValue("iUri")); } }); final Set<String> set = Collections.synchronizedSet(new LinkedHashSet<String>(threadCount)); final List<String> list = Collections.synchronizedList(new ArrayList<String>(threadCount)); final AtomicInteger integer = new AtomicInteger(0); Threads group = new Threads(); for (int i = 0; i < threadCount; ++i) { group.addThread(new Thread(new Runnable() { public void run() { Representation representation = provider.getRepresentation(REP_NAME, type, content); Assert.assertNotNull(representation); Assert.assertEquals(REP_NAME, representation.getName()); final String rep = StringUtils.newStringUtf8(representation.getRepresentation()); list.add(rep); set.add(rep); Assert.assertEquals(GroovyGeneratorTest.MIME_TYPE, representation.getMimeType()); integer.addAndGet(1); } })); } group.start(); try { group.join(); } catch (Exception ex) { logger.error(ex.getMessage(), ex); } logger.info("Generated reps list: " + list); logger.info("Generated reps set: " + set); Assert.assertEquals(threadCount, integer.get()); Assert.assertEquals(threadCount, list.size()); Assert.assertEquals(threadCount, set.size()); }
From source file:org.apache.solr.handler.IndexFetcher.java
/** * This command downloads all the necessary files from master to install a index commit point. Only changed files are * downloaded. It also downloads the conf files (if they are modified). * * @param forceReplication force a replication in all cases * @param forceCoreReload force a core reload in all cases * @return true on success, false if slave is already in sync * @throws IOException if an exception occurs *//*w w w . ja v a 2 s . c om*/ boolean fetchLatestIndex(boolean forceReplication, boolean forceCoreReload) throws IOException, InterruptedException { boolean cleanupDone = false; boolean successfulInstall = false; markReplicationStart(); Directory tmpIndexDir = null; String tmpIndex; Directory indexDir = null; String indexDirPath; boolean deleteTmpIdxDir = true; File tmpTlogDir = null; if (!solrCore.getSolrCoreState().getLastReplicateIndexSuccess()) { // if the last replication was not a success, we force a full replication // when we are a bit more confident we may want to try a partial replication // if the error is connection related or something, but we have to be careful forceReplication = true; } try { //get the current 'replicateable' index version in the master NamedList response; try { response = getLatestVersion(); } catch (Exception e) { LOG.error("Master at: " + masterUrl + " is not available. Index fetch failed. Exception: " + e.getMessage()); return false; } long latestVersion = (Long) response.get(CMD_INDEX_VERSION); long latestGeneration = (Long) response.get(GENERATION); LOG.info("Master's generation: " + latestGeneration); LOG.info("Master's version: " + latestVersion); // TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes) IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit(); if (commit == null) { // Presumably the IndexWriter hasn't been opened yet, and hence the deletion policy hasn't been updated with commit points RefCounted<SolrIndexSearcher> searcherRefCounted = null; try { searcherRefCounted = solrCore.getNewestSearcher(false); if (searcherRefCounted == null) { LOG.warn("No open searcher found - fetch aborted"); return false; } commit = searcherRefCounted.get().getIndexReader().getIndexCommit(); } finally { if (searcherRefCounted != null) searcherRefCounted.decref(); } } LOG.info("Slave's generation: " + commit.getGeneration()); if (latestVersion == 0L) { if (forceReplication && commit.getGeneration() != 0) { // since we won't get the files for an empty index, // we just clear ours and commit RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState() .getIndexWriter(solrCore); try { iw.get().deleteAll(); } finally { iw.decref(); } SolrQueryRequest req = new LocalSolrQueryRequest(solrCore, new ModifiableSolrParams()); solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false)); } //there is nothing to be replicated successfulInstall = true; return true; } // TODO: Should we be comparing timestamps (across machines) here? if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) { //master and slave are already in sync just return LOG.info("Slave in sync with master."); successfulInstall = true; return true; } LOG.info("Starting replication process"); // get the list of files first fetchFileList(latestGeneration); // this can happen if the commit point is deleted before we fetch the file list. if (filesToDownload.isEmpty()) { return false; } LOG.info("Number of files in latest index in master: " + filesToDownload.size()); if (tlogFilesToDownload != null) { LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size()); } // Create the sync service fsyncService = ExecutorUtil .newMDCAwareSingleThreadExecutor(new DefaultSolrThreadFactory("fsyncService")); // use a synchronized list because the list is read by other threads (to show details) filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>()); // if the generation of master is older than that of the slave , it means they are not compatible to be copied // then a new index directory to be created and all the files need to be copied boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || commit.getGeneration() >= latestGeneration || forceReplication; String timestamp = new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date()); String tmpIdxDirName = "index." + timestamp; tmpIndex = solrCore.getDataDir() + tmpIdxDirName; tmpIndexDir = solrCore.getDirectoryFactory().get(tmpIndex, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType); // tmp dir for tlog files if (tlogFilesToDownload != null) { tmpTlogDir = new File(solrCore.getUpdateHandler().getUpdateLog().getLogDir(), "tlog." + timestamp); } // cindex dir... indexDirPath = solrCore.getIndexDir(); indexDir = solrCore.getDirectoryFactory().get(indexDirPath, DirContext.DEFAULT, solrCore.getSolrConfig().indexConfig.lockType); try { //We will compare all the index files from the master vs the index files on disk to see if there is a mismatch //in the metadata. If there is a mismatch for the same index file then we download the entire index again. if (!isFullCopyNeeded && isIndexStale(indexDir)) { isFullCopyNeeded = true; } if (!isFullCopyNeeded) { // a searcher might be using some flushed but not committed segments // because of soft commits (which open a searcher on IW's data) // so we need to close the existing searcher on the last commit // and wait until we are able to clean up all unused lucene files if (solrCore.getCoreDescriptor().getCoreContainer().isZooKeeperAware()) { solrCore.closeSearcher(); } // rollback and reopen index writer and wait until all unused files // are successfully deleted solrCore.getUpdateHandler().newIndexWriter(true); RefCounted<IndexWriter> writer = solrCore.getUpdateHandler().getSolrCoreState() .getIndexWriter(null); try { IndexWriter indexWriter = writer.get(); int c = 0; indexWriter.deleteUnusedFiles(); while (hasUnusedFiles(indexDir, commit)) { indexWriter.deleteUnusedFiles(); LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able"); Thread.sleep(1000); c++; if (c >= 30) { LOG.warn( "IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead"); isFullCopyNeeded = true; break; } } if (c > 0) { LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able"); } } finally { writer.decref(); } } boolean reloadCore = false; try { // we have to be careful and do this after we know isFullCopyNeeded won't be flipped if (!isFullCopyNeeded) { solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true); } LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir); successfulInstall = false; long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir, tmpIndexDir, latestGeneration); if (tlogFilesToDownload != null) { bytesDownloaded += downloadTlogFiles(tmpTlogDir, latestGeneration); reloadCore = true; // reload update log } final long timeTakenSeconds = getReplicationTimeElapsed(); final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? new Long(bytesDownloaded / timeTakenSeconds) : null); LOG.info( "Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}", isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir); Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload); if (!modifiedConfFiles.isEmpty()) { reloadCore = true; downloadConfFiles(confFilesToDownload, latestGeneration); if (isFullCopyNeeded) { successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName); deleteTmpIdxDir = false; } else { successfulInstall = moveIndexFiles(tmpIndexDir, indexDir); } if (tlogFilesToDownload != null) { // move tlog files and refresh ulog only if we successfully installed a new index successfulInstall &= moveTlogFiles(tmpTlogDir); } if (successfulInstall) { if (isFullCopyNeeded) { // let the system know we are changing dir's and the old one // may be closed if (indexDir != null) { solrCore.getDirectoryFactory().doneWithDirectory(indexDir); // Cleanup all index files not associated with any *named* snapshot. solrCore.deleteNonSnapshotIndexFiles(indexDirPath); } } LOG.info("Configuration files are modified, core will be reloaded"); logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);// write to a file time of replication and // conf files. } } else { terminateAndWaitFsyncService(); if (isFullCopyNeeded) { successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName); deleteTmpIdxDir = false; } else { successfulInstall = moveIndexFiles(tmpIndexDir, indexDir); } if (tlogFilesToDownload != null) { // move tlog files and refresh ulog only if we successfully installed a new index successfulInstall &= moveTlogFiles(tmpTlogDir); } if (successfulInstall) { logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall); } } } finally { if (!isFullCopyNeeded) { solrCore.getUpdateHandler().getSolrCoreState().openIndexWriter(solrCore); } } // we must reload the core after we open the IW back up if (successfulInstall && (reloadCore || forceCoreReload)) { LOG.info("Reloading SolrCore {}", solrCore.getName()); reloadCore(); } if (successfulInstall) { if (isFullCopyNeeded) { // let the system know we are changing dir's and the old one // may be closed if (indexDir != null) { LOG.info("removing old index directory " + indexDir); solrCore.getDirectoryFactory().doneWithDirectory(indexDir); solrCore.getDirectoryFactory().remove(indexDir); } } if (isFullCopyNeeded) { solrCore.getUpdateHandler().newIndexWriter(isFullCopyNeeded); } openNewSearcherAndUpdateCommitPoint(); } if (!isFullCopyNeeded && !forceReplication && !successfulInstall) { cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall); cleanupDone = true; // we try with a full copy of the index LOG.warn( "Replication attempt was not successful - trying a full index replication reloadCore={}", reloadCore); successfulInstall = fetchLatestIndex(true, reloadCore); } markReplicationStop(); return successfulInstall; } catch (ReplicationHandlerException e) { LOG.error("User aborted Replication"); return false; } catch (SolrException e) { throw e; } catch (InterruptedException e) { throw new InterruptedException("Index fetch interrupted"); } catch (Exception e) { throw new SolrException(ErrorCode.SERVER_ERROR, "Index fetch failed : ", e); } } finally { if (!cleanupDone) { cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall); } } }
From source file:com.gargoylesoftware.htmlunit.WebClientWaitForBackgroundJobsTest.java
/** * {@link WebClient#waitForBackgroundJavaScriptStartingBefore(long)} should have an overview of all windows. * @throws Exception if the test fails//from ww w. ja va2s . c o m */ @Test public void jobSchedulesJobInOtherWindow2() throws Exception { final String html = "<html>\n" + "<head>\n" + " <script>\n" + " var counter = 0;\n" + " function test() {\n" + " var w = window.open('about:blank');\n" + " w.setTimeout(doWork1, 200);\n" + " }\n" + " function doWork1() {\n" + " alert('work1');\n" + " setTimeout(doWork2, 400);\n" + " }\n" + " function doWork2() {\n" + " alert('work2');\n" + " }\n" + " </script>\n" + "</head>\n" + "<body onload='test()'>\n" + "</body>\n" + "</html>"; final List<String> collectedAlerts = Collections.synchronizedList(new ArrayList<String>()); final HtmlPage page = loadPage(html, collectedAlerts); startTimedTest(); assertEquals(0, page.getWebClient().waitForBackgroundJavaScriptStartingBefore(1000)); assertMaxTestRunTime(1000); final String[] expectedAlerts = { "work1", "work2" }; assertEquals(expectedAlerts, collectedAlerts); }
From source file:com.smartitengineering.cms.spi.impl.content.JavascriptGeneratorTest.java
@Test public void testMultiJavascriptRepGeneration() throws IOException { TypeRepresentationGenerator generator = new JavascriptRepresentationGenerator(); final RepresentationTemplate template = mockery.mock(RepresentationTemplate.class); WorkspaceAPIImpl impl = new WorkspaceAPIImpl() { @Override//from www . jav a 2s . com public RepresentationTemplate getRepresentationTemplate(WorkspaceId id, String name) { return template; } }; impl.setRepresentationGenerators(Collections.singletonMap(TemplateType.JAVASCRIPT, generator)); final RepresentationProvider provider = new RepresentationProviderImpl(); registerBeanFactory(impl); final Content content = mockery.mock(Content.class); final Field field = mockery.mock(Field.class); final FieldValue value = mockery.mock(FieldValue.class); final Map<String, Field> fieldMap = mockery.mock(Map.class); final ContentType type = mockery.mock(ContentType.class); final Map<String, RepresentationDef> reps = mockery.mock(Map.class, "repMap"); final RepresentationDef def = mockery.mock(RepresentationDef.class); final int threadCount = new Random().nextInt(100); logger.info("Number of parallel threads " + threadCount); mockery.checking(new Expectations() { { exactly(threadCount).of(template).getTemplateType(); will(returnValue(TemplateType.JAVASCRIPT)); exactly(threadCount).of(template).getTemplate(); final byte[] toByteArray = IOUtils .toByteArray(getClass().getClassLoader().getResourceAsStream("scripts/js/test-script.js")); will(returnValue(toByteArray)); exactly(threadCount).of(template).getName(); will(returnValue(REP_NAME)); for (int i = 0; i < threadCount; ++i) { exactly(1).of(value).getValue(); will(returnValue(String.valueOf(i))); } exactly(threadCount).of(field).getValue(); will(returnValue(value)); exactly(threadCount).of(fieldMap).get(with(Expectations.<String>anything())); will(returnValue(field)); exactly(threadCount).of(content).getFields(); will(returnValue(fieldMap)); exactly(threadCount).of(content).getContentDefinition(); will(returnValue(type)); final ContentId contentId = mockery.mock(ContentId.class); exactly(2 * threadCount).of(content).getContentId(); will(returnValue(contentId)); final WorkspaceId wId = mockery.mock(WorkspaceId.class); exactly(threadCount).of(contentId).getWorkspaceId(); will(returnValue(wId)); exactly(2 * threadCount).of(type).getRepresentationDefs(); will(returnValue(reps)); exactly(2 * threadCount).of(reps).get(with(REP_NAME)); will(returnValue(def)); exactly(threadCount).of(def).getParameters(); will(returnValue(Collections.emptyMap())); exactly(threadCount).of(def).getMIMEType(); will(returnValue(GroovyGeneratorTest.MIME_TYPE)); final ResourceUri rUri = mockery.mock(ResourceUri.class); exactly(threadCount).of(def).getResourceUri(); will(returnValue(rUri)); exactly(threadCount).of(rUri).getValue(); will(returnValue("iUri")); } }); Assert.assertNotNull(SmartContentAPI.getInstance()); Assert.assertNotNull(SmartContentAPI.getInstance().getContentLoader()); final Set<String> set = Collections.synchronizedSet(new LinkedHashSet<String>(threadCount)); final List<String> list = Collections.synchronizedList(new ArrayList<String>(threadCount)); final AtomicInteger integer = new AtomicInteger(0); Threads group = new Threads(); for (int i = 0; i < threadCount; ++i) { group.addThread(new Thread(new Runnable() { public void run() { Representation representation = provider.getRepresentation(REP_NAME, type, content); Assert.assertNotNull(representation); Assert.assertEquals(REP_NAME, representation.getName()); final String rep = StringUtils.newStringUtf8(representation.getRepresentation()); list.add(rep); set.add(rep); Assert.assertEquals(GroovyGeneratorTest.MIME_TYPE, representation.getMimeType()); integer.addAndGet(1); } })); } group.start(); try { group.join(); } catch (Exception ex) { logger.error(ex.getMessage(), ex); } logger.info("Generated reps list: " + list); logger.info("Generated reps set: " + set); Assert.assertEquals(threadCount, integer.get()); Assert.assertEquals(threadCount, list.size()); Assert.assertEquals(threadCount, set.size()); }