List of usage examples for java.util.concurrent ConcurrentMap size
int size();
From source file:com.github.podd.utils.test.OntologyUtilsTest.java
@Test public void testImportsOrderFourLevelsOutOfOrder() throws Exception { final Model model = new LinkedHashModel(); OntologyUtils.ontologyIDsToModel(Arrays.asList(OntologyConstant.testOntologyID), model); model.add(OntologyConstant.testVersionUri1, OWL.IMPORTS, OntologyConstant.testImportOntologyUri1); model.add(OntologyConstant.testImportOntologyUri1, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri1, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri1); model.add(OntologyConstant.testImportVersionUri1, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri2, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri2, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri2); model.add(OntologyConstant.testImportVersionUri2, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri1, OWL.IMPORTS, OntologyConstant.testImportVersionUri2); model.add(OntologyConstant.testImportOntologyUri3, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri3, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri3); model.add(OntologyConstant.testImportVersionUri3, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri2, OWL.IMPORTS, OntologyConstant.testImportVersionUri3); model.add(OntologyConstant.testImportOntologyUri4, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri4, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri4); model.add(OntologyConstant.testImportVersionUri4, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri3, OWL.IMPORTS, OntologyConstant.testImportVersionUri4); final Set<URI> schemaOntologyUris = new LinkedHashSet<URI>(); final Set<URI> schemaVersionUris = new LinkedHashSet<URI>(); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri2); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri4); schemaOntologyUris.add(OntologyConstant.testOntologyUri1); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri3); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri1); schemaVersionUris.add(OntologyConstant.testImportVersionUri2); schemaVersionUris.add(OntologyConstant.testImportVersionUri4); schemaVersionUris.add(OntologyConstant.testVersionUri1); schemaVersionUris.add(OntologyConstant.testImportVersionUri3); schemaVersionUris.add(OntologyConstant.testImportVersionUri1); final ConcurrentMap<URI, Set<URI>> importsMap = new ConcurrentHashMap<URI, Set<URI>>(); final List<URI> orderImports = OntologyUtils.orderImports(model, schemaOntologyUris, schemaVersionUris, importsMap, false);/*from w w w . j a v a 2 s. com*/ Assert.assertEquals(5, orderImports.size()); Assert.assertEquals(OntologyConstant.testImportVersionUri4, orderImports.get(0)); Assert.assertEquals(OntologyConstant.testImportVersionUri3, orderImports.get(1)); Assert.assertEquals(OntologyConstant.testImportVersionUri2, orderImports.get(2)); Assert.assertEquals(OntologyConstant.testImportVersionUri1, orderImports.get(3)); Assert.assertEquals(OntologyConstant.testVersionUri1, orderImports.get(4)); Assert.assertEquals(5, importsMap.size()); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri4)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri1)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testVersionUri1)); final Set<URI> imports4 = importsMap.get(OntologyConstant.testImportVersionUri4); Assert.assertNotNull(imports4); Assert.assertEquals(0, imports4.size()); final Set<URI> imports3 = importsMap.get(OntologyConstant.testImportVersionUri3); Assert.assertNotNull(imports3); Assert.assertEquals(1, imports3.size()); Assert.assertTrue(imports3.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> imports2 = importsMap.get(OntologyConstant.testImportVersionUri2); Assert.assertNotNull(imports2); Assert.assertEquals(2, imports2.size()); Assert.assertTrue(imports2.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(imports2.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> imports1 = importsMap.get(OntologyConstant.testImportVersionUri1); Assert.assertNotNull(imports1); Assert.assertEquals(3, imports1.size()); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> importsRoot = importsMap.get(OntologyConstant.testVersionUri1); Assert.assertNotNull(importsRoot); Assert.assertEquals(4, importsRoot.size()); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri1)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri4)); }
From source file:com.github.podd.utils.test.OntologyUtilsTest.java
@Test public void testImportsOrderFourLevels() throws Exception { final Model model = new LinkedHashModel(); OntologyUtils.ontologyIDsToModel(Arrays.asList(OntologyConstant.testOntologyID), model); model.add(OntologyConstant.testVersionUri1, OWL.IMPORTS, OntologyConstant.testImportOntologyUri1); model.add(OntologyConstant.testImportOntologyUri1, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri1, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri1); model.add(OntologyConstant.testImportVersionUri1, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri2, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri2, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri2); model.add(OntologyConstant.testImportVersionUri2, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri1, OWL.IMPORTS, OntologyConstant.testImportVersionUri2); model.add(OntologyConstant.testImportOntologyUri3, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri3, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri3); model.add(OntologyConstant.testImportVersionUri3, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri2, OWL.IMPORTS, OntologyConstant.testImportVersionUri3); model.add(OntologyConstant.testImportOntologyUri4, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportOntologyUri4, OWL.VERSIONIRI, OntologyConstant.testImportVersionUri4); model.add(OntologyConstant.testImportVersionUri4, RDF.TYPE, OWL.ONTOLOGY); model.add(OntologyConstant.testImportVersionUri3, OWL.IMPORTS, OntologyConstant.testImportVersionUri4); final Set<URI> schemaOntologyUris = new LinkedHashSet<URI>(); final Set<URI> schemaVersionUris = new LinkedHashSet<URI>(); schemaOntologyUris.add(OntologyConstant.testOntologyUri1); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri1); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri2); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri3); schemaOntologyUris.add(OntologyConstant.testImportOntologyUri4); schemaVersionUris.add(OntologyConstant.testVersionUri1); schemaVersionUris.add(OntologyConstant.testImportVersionUri1); schemaVersionUris.add(OntologyConstant.testImportVersionUri2); schemaVersionUris.add(OntologyConstant.testImportVersionUri3); schemaVersionUris.add(OntologyConstant.testImportVersionUri4); final ConcurrentMap<URI, Set<URI>> importsMap = new ConcurrentHashMap<URI, Set<URI>>(); // Expected output solution from importsMap after calling orderImports // importsMap.put(testVersionUri1, // Collections.singleton(OntologyConstant.testImportVersionUri1)); // importsMap.put(testImportVersionUri1, // Collections.singleton(OntologyConstant.testImportVersionUri2)); // importsMap.put(testImportVersionUri2, // Collections.singleton(OntologyConstant.testImportVersionUri3)); // importsMap.put(testImportVersionUri3, new // HashSet<URI>(Arrays.asList(OntologyConstant.testImportVersionUri4))); // importsMap.put(testImportVersionUri4, new HashSet<URI>()); final List<URI> orderImports = OntologyUtils.orderImports(model, schemaOntologyUris, schemaVersionUris, importsMap, false);//from w w w .ja v a2 s . c om Assert.assertEquals(5, orderImports.size()); Assert.assertEquals(OntologyConstant.testImportVersionUri4, orderImports.get(0)); Assert.assertEquals(OntologyConstant.testImportVersionUri3, orderImports.get(1)); Assert.assertEquals(OntologyConstant.testImportVersionUri2, orderImports.get(2)); Assert.assertEquals(OntologyConstant.testImportVersionUri1, orderImports.get(3)); Assert.assertEquals(OntologyConstant.testVersionUri1, orderImports.get(4)); Assert.assertEquals(5, importsMap.size()); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri4)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testImportVersionUri1)); Assert.assertTrue(importsMap.containsKey(OntologyConstant.testVersionUri1)); final Set<URI> imports4 = importsMap.get(OntologyConstant.testImportVersionUri4); Assert.assertNotNull(imports4); Assert.assertEquals(0, imports4.size()); final Set<URI> imports3 = importsMap.get(OntologyConstant.testImportVersionUri3); Assert.assertNotNull(imports3); Assert.assertEquals(1, imports3.size()); Assert.assertTrue(imports3.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> imports2 = importsMap.get(OntologyConstant.testImportVersionUri2); Assert.assertNotNull(imports2); Assert.assertEquals(2, imports2.size()); Assert.assertTrue(imports2.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(imports2.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> imports1 = importsMap.get(OntologyConstant.testImportVersionUri1); Assert.assertNotNull(imports1); Assert.assertEquals(3, imports1.size()); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(imports1.contains(OntologyConstant.testImportVersionUri4)); final Set<URI> importsRoot = importsMap.get(OntologyConstant.testVersionUri1); Assert.assertNotNull(importsRoot); Assert.assertEquals(4, importsRoot.size()); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri1)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri2)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri3)); Assert.assertTrue(importsRoot.contains(OntologyConstant.testImportVersionUri4)); }
From source file:org.apache.flink.yarn.YarnTestBase.java
public static int getRunningContainers() { int count = 0; for (int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) { NodeManager nm = yarnCluster.getNodeManager(nmId); ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers(); count += containers.size(); }/*from ww w. j a va 2 s .co m*/ return count; }
From source file:org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.java
private void verifyNodesAfterDecom(MockRM rm, int numNodes, Resource expectedCapability, String expectedVersion) { ConcurrentMap<NodeId, RMNode> inactiveRMNodes = rm.getRMContext().getInactiveRMNodes(); Assert.assertEquals(numNodes, inactiveRMNodes.size()); for (RMNode rmNode : inactiveRMNodes.values()) { Assert.assertEquals(expectedCapability, rmNode.getTotalCapability()); Assert.assertEquals(expectedVersion, rmNode.getNodeManagerVersion()); }//from w w w .j a v a2 s.c om }
From source file:org.apache.nifi.web.security.otp.OtpService.java
/** * Generates a token and stores it in the specified cache. * * @param cache The cache * @param authenticationToken The authentication * @return The one time use token *///from w w w. j a va 2 s .c om private String generateToken(final ConcurrentMap<CacheKey, String> cache, final OtpAuthenticationToken authenticationToken) { if (cache.size() >= MAX_CACHE_SOFT_LIMIT) { throw new IllegalStateException("The maximum number of single use tokens have been issued."); } // hash the authentication and build a cache key final CacheKey cacheKey = new CacheKey(hash(authenticationToken)); // store the token unless the token is already stored which should not update it's original timestamp cache.putIfAbsent(cacheKey, authenticationToken.getName()); // return the token return cacheKey.getKey(); }
From source file:org.onosproject.yms.app.ysr.DefaultYangSchemaRegistry.java
/** * Removes schema node from schema map.// w w w . j av a 2 s . c o m * * @param removableNode schema node which needs to be removed */ private void removeSchemaNode(YangSchemaNode removableNode) { String name = removableNode.getName(); String revName = name; String date = getDateInStringFormat(removableNode); if (date != null) { revName = name + AT + date; } ConcurrentMap<String, YangSchemaNode> revMap = yangSchemaStore.get(name); if (revMap != null && !revMap.isEmpty() && revMap.size() != 1) { revMap.remove(revName); } else { yangSchemaStore.remove(removableNode.getName()); } }
From source file:org.silverpeas.core.viewer.service.ViewServiceConcurrencyDemonstrationIT.java
@Test public void demonstrateConcurrencyManagement() throws Exception { if (canPerformViewConversionTest()) { final List<Throwable> throwables = new ArrayList<>(); final ConcurrentMap serviceCache = (ConcurrentMap) FieldUtils.readField(viewService, "cache", true); assertThat(serviceCache.size(), is(0)); final long startTime = System.currentTimeMillis(); final int NB_VIEW_CALLS = 100; final int LAST_REQUEST_INDEX = NB_VIEW_CALLS - 1; final long durationToIncreaseChancesToPerformThreadBefore = 50; final long[] durationTimes = new long[NB_VIEW_CALLS]; final long[] endTimes = new long[NB_VIEW_CALLS]; final DocumentView[] results = new DocumentView[NB_VIEW_CALLS]; for (int i = 0; i < LAST_REQUEST_INDEX; i++) { final int index = i; SubThreadManager.addAndStart(new Thread(() -> { try { final long startThreadTime = System.currentTimeMillis(); final DocumentView viewFirstRequest = viewService .getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.ppt"))); final long endThreadTime = System.currentTimeMillis(); durationTimes[index] = endThreadTime - startThreadTime; endTimes[index] = endThreadTime; results[index] = viewFirstRequest; } catch (Exception e) { throwables.add(e);/*from w ww. j av a 2 s . co m*/ SubThreadManager.killAll(); } })); } SubThreadManager.addAndStart(new Thread(() -> { try { Thread.sleep(500); // Technical verification assertThat("At this level, the cache has 2 elements", serviceCache.size(), is(2)); assertThat(getTemporaryPath().listFiles(), arrayWithSize(2)); } catch (Throwable e) { throwables.add(e); SubThreadManager.killAll(); } })); Thread.sleep(durationToIncreaseChancesToPerformThreadBefore); final long startLastRequestTime = System.currentTimeMillis(); final DocumentView viewLastRequest = viewService .getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.ppt"))); final long endLastRequestTime = System.currentTimeMillis(); durationTimes[LAST_REQUEST_INDEX] = endLastRequestTime - startLastRequestTime; endTimes[LAST_REQUEST_INDEX] = endLastRequestTime; results[LAST_REQUEST_INDEX] = viewLastRequest; // Waiting for all thread ends SubThreadManager.joinAll(60000); for (Throwable throwable : throwables) { throw new RuntimeException(throwable); } for (long endTime : endTimes) { assertThat(endTime, greaterThan(startTime)); } long minEndTime = Long.MAX_VALUE; long maxEndTime = 0; for (long endTime : endTimes) { assertThat(endTime, greaterThan(startTime)); minEndTime = Math.min(minEndTime, endTime); maxEndTime = Math.max(maxEndTime, endTime); } assertThat((maxEndTime - minEndTime), lessThan(250l)); for (DocumentView documentView : results) { assertThat(documentView, notNullValue()); } for (int i = 0; i < LAST_REQUEST_INDEX; i++) { assertThat(results[i].getPhysicalFile(), is(viewLastRequest.getPhysicalFile())); assertThat(results[i].getNbPages(), is(viewLastRequest.getNbPages())); assertThat(results[i].getWidth(), is(viewLastRequest.getWidth())); assertThat(results[i].getHeight(), is(viewLastRequest.getHeight())); assertThat(results[i].getDisplayLicenseKey(), is(viewLastRequest.getDisplayLicenseKey())); assertThat(results[i].getOriginalFileName(), is(viewLastRequest.getOriginalFileName())); assertThat(results[i].getURLAsString(), is(viewLastRequest.getURLAsString())); assertThat(results[i].isDocumentSplit(), is(viewLastRequest.isDocumentSplit())); assertThat(results[i].areSearchDataComputed(), is(viewLastRequest.areSearchDataComputed())); } assertThat(serviceCache.size(), is(0)); assertThat(getTemporaryPath().listFiles(), arrayWithSize(2)); } }
From source file:org.silverpeas.core.viewer.service.ViewServiceConcurrencyDemonstrationIT.java
@Test public void demonstrateConcurrencyManagementWithTwoDifferentConversionsAtSameTime() throws Exception { if (canPerformViewConversionTest()) { final List<Throwable> throwables = new ArrayList<>(); final ConcurrentMap serviceCache = (ConcurrentMap) FieldUtils.readField(viewService, "cache", true); assertThat(serviceCache.size(), is(0)); final int NB_VIEW_CALLS = 100; for (int i = 0; i < NB_VIEW_CALLS; i++) { SubThreadManager.addAndStart(new Thread(() -> { try { viewService.getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.odp"))); } catch (Exception ignore) { }/* w ww.j a va 2 s. c o m*/ })); SubThreadManager.addAndStart(new Thread(() -> { try { viewService.getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.pdf"))); } catch (Exception ignore) { } })); } SubThreadManager.addAndStart(new Thread(() -> { try { Thread.sleep(500); // Technical verification assertThat(getTemporaryPath().listFiles(), arrayWithSize(4)); } catch (Throwable e) { throwables.add(e); SubThreadManager.killAll(); } })); // Waiting for all thread ends SubThreadManager.joinAll(60000); for (Throwable throwable : throwables) { throw new RuntimeException(throwable); } assertThat(serviceCache.size(), is(0)); assertThat(getTemporaryPath().listFiles(), arrayWithSize(4)); } }
From source file:org.silverpeas.core.viewer.service.ViewServiceConcurrencyDemonstrationIT.java
@Test public void demonstrateConcurrencyManagementWithThreeDifferentConversionsAtSameTime() throws Exception { if (canPerformViewConversionTest()) { final List<Throwable> throwables = new ArrayList<>(); final ConcurrentMap serviceCache = (ConcurrentMap) FieldUtils.readField(viewService, "cache", true); assertThat(serviceCache.size(), is(0)); final int NB_VIEW_CALLS = 100; for (int i = 0; i < NB_VIEW_CALLS; i++) { SubThreadManager.addAndStart(new Thread(new Runnable() { @Override/* w ww . ja v a2 s .com*/ public void run() { try { viewService.getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.odp"))); } catch (Exception ignore) { } } })); SubThreadManager.addAndStart(new Thread(new Runnable() { @Override public void run() { try { viewService.getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.pdf"))); } catch (Exception ignore) { } } })); SubThreadManager.addAndStart(new Thread(new Runnable() { @Override public void run() { try { viewService.getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.odt"))); } catch (Exception ignore) { } } })); } SubThreadManager.addAndStart(new Thread(new Runnable() { @Override public void run() { try { Thread.sleep(500); // Technical verification assertThat(getTemporaryPath().listFiles(), arrayWithSize(6)); } catch (Throwable e) { throwables.add(e); SubThreadManager.killAll(); } } })); // Waiting for all thread ends SubThreadManager.joinAll(60000); for (Throwable throwable : throwables) { throw new RuntimeException(throwable); } assertThat(serviceCache.size(), is(0)); assertThat(getTemporaryPath().listFiles(), arrayWithSize(6)); } }
From source file:org.silverpeas.core.viewer.service.ViewServiceConcurrencyDemonstrationTest.java
@Test public void demonstrateConcurrencyManagement() throws Exception { if (canPerformViewConversionTest()) { final List<Throwable> throwables = new ArrayList<>(); final ConcurrentMap serviceCache = (ConcurrentMap) FieldUtils.readField(viewService, "cache", true); assertThat(serviceCache.size(), is(0)); final long startTime = System.currentTimeMillis(); final int NB_VIEW_CALLS = 100; final int LAST_REQUEST_INDEX = NB_VIEW_CALLS - 1; final long durationToIncreaseChancesToPerformThreadBefore = 50; final long[] durationTimes = new long[NB_VIEW_CALLS]; final long[] endTimes = new long[NB_VIEW_CALLS]; final DocumentView[] results = new DocumentView[NB_VIEW_CALLS]; for (int i = 0; i < LAST_REQUEST_INDEX; i++) { final int index = i; SubThreadManager.addAndStart(new Thread(new Runnable() { @Override//from ww w . j a v a 2 s.c om public void run() { try { final long startThreadTime = System.currentTimeMillis(); final DocumentView viewFirstRequest = viewService .getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.ppt"))); final long endThreadTime = System.currentTimeMillis(); durationTimes[index] = endThreadTime - startThreadTime; endTimes[index] = endThreadTime; results[index] = viewFirstRequest; } catch (Exception e) { throwables.add(e); SubThreadManager.killAll(); } } })); } SubThreadManager.addAndStart(new Thread(new Runnable() { @Override public void run() { try { Thread.sleep(500); // Technical verification assertThat("At this level, the cache has 2 elements", serviceCache.size(), is(2)); assertThat(getTemporaryPath().listFiles(), arrayWithSize(2)); } catch (Throwable e) { throwables.add(e); SubThreadManager.killAll(); } } })); Thread.sleep(durationToIncreaseChancesToPerformThreadBefore); final long startLastRequestTime = System.currentTimeMillis(); final DocumentView viewLastRequest = viewService .getDocumentView(ViewerContext.from(getSimpleDocumentNamed("file.ppt"))); final long endLastRequestTime = System.currentTimeMillis(); durationTimes[LAST_REQUEST_INDEX] = endLastRequestTime - startLastRequestTime; endTimes[LAST_REQUEST_INDEX] = endLastRequestTime; results[LAST_REQUEST_INDEX] = viewLastRequest; // Waiting for all thread ends SubThreadManager.joinAll(60000); for (Throwable throwable : throwables) { throw new RuntimeException(throwable); } for (long endTime : endTimes) { assertThat(endTime, greaterThan(startTime)); } long minEndTime = Long.MAX_VALUE; long maxEndTime = 0; for (long endTime : endTimes) { assertThat(endTime, greaterThan(startTime)); minEndTime = Math.min(minEndTime, endTime); maxEndTime = Math.max(maxEndTime, endTime); } assertThat((maxEndTime - minEndTime), lessThan(250l)); for (DocumentView documentView : results) { assertThat(documentView, notNullValue()); } for (int i = 0; i < LAST_REQUEST_INDEX; i++) { assertThat(results[i].getPhysicalFile(), is(viewLastRequest.getPhysicalFile())); assertThat(results[i].getNbPages(), is(viewLastRequest.getNbPages())); assertThat(results[i].getWidth(), is(viewLastRequest.getWidth())); assertThat(results[i].getHeight(), is(viewLastRequest.getHeight())); assertThat(results[i].getDisplayLicenseKey(), is(viewLastRequest.getDisplayLicenseKey())); assertThat(results[i].getOriginalFileName(), is(viewLastRequest.getOriginalFileName())); assertThat(results[i].getURLAsString(), is(viewLastRequest.getURLAsString())); assertThat(results[i].isDocumentSplit(), is(viewLastRequest.isDocumentSplit())); assertThat(results[i].areSearchDataComputed(), is(viewLastRequest.areSearchDataComputed())); } assertThat(serviceCache.size(), is(0)); assertThat(getTemporaryPath().listFiles(), arrayWithSize(2)); } }