List of usage examples for java.util Collections synchronizedSet
public static <T> Set<T> synchronizedSet(Set<T> s)
From source file:com.firefly.sample.castcompanionlibrary.cast.VideoCastManager.java
private VideoCastManager(Context context, String applicationId, Class<?> targetActivity, String dataNamespace) { super(context, applicationId); LOGD(TAG, "VideoCastManager is instantiated"); mVideoConsumers = Collections.synchronizedSet(new HashSet<IVideoCastConsumer>()); mDataNamespace = dataNamespace;//from w ww .jav a 2 s. co m if (null == targetActivity) { targetActivity = VideoCastControllerActivity.class; } mTargetActivity = targetActivity; Utils.saveStringToPreference(mContext, PREFS_KEY_CAST_ACTIVITY_NAME, mTargetActivity.getName()); if (null != mDataNamespace) { Utils.saveStringToPreference(mContext, PREFS_KEY_CAST_CUSTOM_DATA_NAMESPACE, dataNamespace); } mMiniControllers = Collections.synchronizedSet(new HashSet<IMiniController>()); mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); mMediaButtonReceiverComponent = new ComponentName(context, VideoIntentReceiver.class); mHandler = new Handler(new UpdateNotificationHandlerCallback()); }
From source file:air.com.snagfilms.cast.chromecast.VideoChromeCastManager.java
private VideoChromeCastManager(Context context, String applicationId, Class<?> targetActivity, String dataNamespace) {/*from w w w .java 2 s . c o m*/ super(context, applicationId); mContext = context; Log.d(TAG, "VideoChromeCastManager is instantiated"); mVideoConsumers = Collections.synchronizedSet(new HashSet<IVideoCastConsumer>()); mDataNamespace = dataNamespace; if (null == targetActivity) { targetActivity = VideoPlayerActivity.class; } mTargetActivity = targetActivity; Utils.saveStringToPreference(context, Constants.PREFS_KEY_CAST_ACTIVITY_NAME, mTargetActivity.getName()); mMiniControllers = Collections.synchronizedSet(new HashSet<IMiniController>()); mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); mMediaButtonReceiverComponent = new ComponentName(context, VideoIntentReceiver.class); mHandler = new Handler(new UpdateNotificationHandlerCallback()); }
From source file:com.eamonndunne.londontraffic.view.ImageCache.java
/** * Initialize the cache, providing all parameters. * * @param cacheParams The cache parameters to initialize the cache *///from ww w . ja v a 2 s . c o m private void init(ImageCacheParams cacheParams) { mCacheParams = cacheParams; //BEGIN_INCLUDE(init_memory_cache) // Set up memory cache if (mCacheParams.memoryCacheEnabled) { // If we're running on Honeycomb or newer, create a set of reusable bitmaps that can be // populated into the inBitmap field of BitmapFactory.Options. Note that the set is // of SoftReferences which will actually not be very effective due to the garbage // collector being aggressive clearing Soft/WeakReferences. A better approach // would be to use a strongly references bitmaps, however this would require some // balancing of memory usage between this set and the bitmap LruCache. It would also // require knowledge of the expected size of the bitmaps. From Honeycomb to JellyBean // the size would need to be precise, from KitKat onward the size would just need to // be the upper bound (due to changes in how inBitmap can re-use bitmaps). if (Utils.hasHoneycomb()) { mReusableBitmaps = Collections.synchronizedSet(new HashSet<SoftReference<Bitmap>>()); } mMemoryCache = new LruCache<String, BitmapDrawable>(mCacheParams.memCacheSize) { /** * Notify the removed entry that is no longer being cached */ @Override protected void entryRemoved(boolean evicted, String key, BitmapDrawable oldValue, BitmapDrawable newValue) { if (RecyclingBitmapDrawable.class.isInstance(oldValue)) { // The removed entry is a recycling drawable, so notify it // that it has been removed from the memory cache ((RecyclingBitmapDrawable) oldValue).setIsCached(false); } else { // The removed entry is a standard BitmapDrawable if (Utils.hasHoneycomb()) { // We're running on Honeycomb or later, so add the bitmap // to a SoftReference set for possible use with inBitmap later mReusableBitmaps.add(new SoftReference<Bitmap>(oldValue.getBitmap())); } } } /** * Measure item size in kilobytes rather than units which is more practical * for a bitmap cache */ @Override protected int sizeOf(String key, BitmapDrawable value) { final int bitmapSize = getBitmapSize(value) / 1024; return bitmapSize == 0 ? 1 : bitmapSize; } }; } //END_INCLUDE(init_memory_cache) // By default the disk cache is not initialized here as it should be initialized // on a separate thread due to disk access. if (cacheParams.initDiskCacheOnCreate) { // Set up disk cache initDiskCache(); } }
From source file:com.androidpi.bricks.gallery.lru.cache.ImageCache.java
/** * Initialize the cache, providing all parameters. * * @param cacheParams The cache parameters to initialize the cache *//*from www . jav a2 s. com*/ private void init(ImageCacheParams cacheParams) { mCacheParams = cacheParams; // BEGIN_INCLUDE(init_memory_cache) // Set up memory cache if (mCacheParams.memoryCacheEnabled) { if (BuildConfig.DEBUG) { Log.d(TAG, "Memory cache created (size = " + mCacheParams.memCacheSize + ")"); } // If we're running on Honeycomb or newer, create a set of reusable // bitmaps that can be // populated into the inBitmap field of BitmapFactory.Options. Note // that the set is // of SoftReferences which will actually not be very effective due // to the garbage // collector being aggressive clearing Soft/WeakReferences. A better // approach // would be to use a strongly references bitmaps, however this would // require some // balancing of memory usage between this set and the bitmap // LruCache. It would also // require knowledge of the expected size of the bitmaps. From // Honeycomb to JellyBean // the size would need to be precise, from KitKat onward the size // would just need to // be the upper bound (due to changes in how inBitmap can re-use // bitmaps). if (AppUtil.hasHoneycomb()) { mReusableBitmaps = Collections.synchronizedSet(new HashSet<SoftReference<Bitmap>>()); } mMemoryCache = new LruCache<String, BitmapDrawable>(mCacheParams.memCacheSize) { /** * Notify the removed entry that is no longer being cached */ @Override protected void entryRemoved(boolean evicted, String key, BitmapDrawable oldValue, BitmapDrawable newValue) { if (RecyclingBitmapDrawable.class.isInstance(oldValue)) { // The removed entry is a recycling drawable, so notify // it // that it has been removed from the memory cache ((RecyclingBitmapDrawable) oldValue).setIsCached(false); } else { // The removed entry is a standard BitmapDrawable if (AppUtil.hasHoneycomb()) { // We're running on Honeycomb or later, so add the // bitmap // to a SoftReference set for possible use with // inBitmap later mReusableBitmaps.add(new SoftReference<Bitmap>(oldValue.getBitmap())); } } } /** * Measure item size in kilobytes rather than units which is * more practical for a bitmap cache */ @Override protected int sizeOf(String key, BitmapDrawable value) { final int bitmapSize = getBitmapSize(value) / 1024; return bitmapSize == 0 ? 1 : bitmapSize; } }; } // END_INCLUDE(init_memory_cache) // By default the disk cache is not initialized here as it should be // initialized // on a separate thread due to disk access. if (cacheParams.initDiskCacheOnCreate) { // Set up disk cache initDiskCache(); } }
From source file:org.intermine.web.logic.session.SessionMethods.java
/** * Record a message that will be stored in the session until it is displayed to * the user. This allows actions that result in a redirect to display * message to the user after the redirect. Messages are stored in a Set * session attribute so you may call this method multiple times to display * multiple errors. Identical errors will be ignored.<p> * * The <code>attrib</code> parameter specifies the name of the session attribute * used to store the set of messages.//from w w w. j a v a 2s .co m * * @param session The Session object in which to store the message * @param attrib The name of the session attribute in which to store message * @param message The message to store */ private static void recordMessage(String message, String attrib, HttpSession session) { Set<String> set = (Set<String>) session.getAttribute(attrib); if (set == null) { set = Collections.synchronizedSet(new LinkedHashSet<String>()); session.setAttribute(attrib, set); } set.add(message); }
From source file:com.smartitengineering.cms.spi.impl.content.VelocityGeneratorTest.java
@Test public void testMultiVelocityRepGeneration() throws IOException { TypeRepresentationGenerator generator = new VelocityRepresentationGenerator(); final RepresentationTemplate template = mockery.mock(RepresentationTemplate.class); WorkspaceAPIImpl impl = new WorkspaceAPIImpl() { @Override/*w w w . j av a2s . co m*/ public RepresentationTemplate getRepresentationTemplate(WorkspaceId id, String name) { return template; } }; impl.setRepresentationGenerators(Collections.singletonMap(TemplateType.VELOCITY, generator)); final RepresentationProvider provider = new RepresentationProviderImpl(); final WorkspaceAPI api = impl; registerBeanFactory(api); final Content content = mockery.mock(Content.class); final Field field = mockery.mock(Field.class); final FieldValue value = mockery.mock(FieldValue.class); final Map<String, Field> fieldMap = mockery.mock(Map.class); final ContentType type = mockery.mock(ContentType.class); final Map<String, RepresentationDef> reps = mockery.mock(Map.class, "repMap"); final RepresentationDef def = mockery.mock(RepresentationDef.class); final int threadCount = new Random().nextInt(100); logger.info("Number of parallel threads " + threadCount); mockery.checking(new Expectations() { { exactly(threadCount).of(template).getTemplateType(); will(returnValue(TemplateType.VELOCITY)); exactly(threadCount).of(template).getTemplate(); final byte[] toByteArray = IOUtils.toByteArray( getClass().getClassLoader().getResourceAsStream("scripts/velocity/test-template.vm")); will(returnValue(toByteArray)); exactly(threadCount).of(template).getName(); will(returnValue(REP_NAME)); for (int i = 0; i < threadCount; ++i) { exactly(1).of(value).getValue(); will(returnValue(String.valueOf(i))); } exactly(threadCount).of(field).getValue(); will(returnValue(value)); exactly(threadCount).of(fieldMap).get(with(Expectations.<String>anything())); will(returnValue(field)); exactly(threadCount).of(content).getFields(); will(returnValue(fieldMap)); exactly(threadCount).of(content).getContentDefinition(); will(returnValue(type)); final ContentId contentId = mockery.mock(ContentId.class); exactly(2 * threadCount).of(content).getContentId(); will(returnValue(contentId)); final WorkspaceId wId = mockery.mock(WorkspaceId.class); exactly(threadCount).of(contentId).getWorkspaceId(); will(returnValue(wId)); exactly(2 * threadCount).of(type).getRepresentationDefs(); will(returnValue(reps)); exactly(2 * threadCount).of(reps).get(with(REP_NAME)); will(returnValue(def)); exactly(threadCount).of(def).getParameters(); will(returnValue(Collections.emptyMap())); exactly(threadCount).of(def).getMIMEType(); will(returnValue(GroovyGeneratorTest.MIME_TYPE)); final ResourceUri rUri = mockery.mock(ResourceUri.class); exactly(threadCount).of(def).getResourceUri(); will(returnValue(rUri)); exactly(threadCount).of(rUri).getValue(); will(returnValue("iUri")); } }); final Set<String> set = Collections.synchronizedSet(new LinkedHashSet<String>(threadCount)); final List<String> list = Collections.synchronizedList(new ArrayList<String>(threadCount)); final AtomicInteger integer = new AtomicInteger(0); Threads group = new Threads(); for (int i = 0; i < threadCount; ++i) { group.addThread(new Thread(new Runnable() { public void run() { Representation representation = provider.getRepresentation(REP_NAME, type, content); Assert.assertNotNull(representation); Assert.assertEquals(REP_NAME, representation.getName()); final String rep = StringUtils.newStringUtf8(representation.getRepresentation()); list.add(rep); set.add(rep); Assert.assertEquals(GroovyGeneratorTest.MIME_TYPE, representation.getMimeType()); integer.addAndGet(1); } })); } group.start(); try { group.join(); } catch (Exception ex) { logger.error(ex.getMessage(), ex); } logger.info("Generated reps list: " + list); logger.info("Generated reps set: " + set); Assert.assertEquals(threadCount, integer.get()); Assert.assertEquals(threadCount, list.size()); Assert.assertEquals(threadCount, set.size()); }
From source file:com.google.sample.castcompanionlibrary.cast.VideoCastManager.java
private VideoCastManager(Context context, String applicationId, Class<?> targetActivity, String dataNamespace) { super(context, applicationId); LOGD(TAG, "VideoCastManager is instantiated"); mDataNamespace = dataNamespace;/* www . j av a2 s. co m*/ if (null == targetActivity) { targetActivity = VideoCastControllerActivity.class; } mTargetActivity = targetActivity; Utils.saveStringToPreference(mContext, PREFS_KEY_CAST_ACTIVITY_NAME, mTargetActivity.getName()); if (null != mDataNamespace) { Utils.saveStringToPreference(mContext, PREFS_KEY_CAST_CUSTOM_DATA_NAMESPACE, dataNamespace); } mMiniControllers = Collections.synchronizedSet(new HashSet<IMiniController>()); mAudioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE); mMediaButtonReceiverComponent = new ComponentName(context, VideoIntentReceiver.class); }
From source file:eu.itesla_project.modules.wca.WCATool.java
@Override public void run(CommandLine line) throws Exception { Path caseFile = Paths.get(line.getOptionValue("case-file")); String offlineWorkflowId = line.getOptionValue("offline-workflow-id"); // can be null meaning use no offline security rules Interval histoInterval = Interval.parse(line.getOptionValue("history-interval")); String rulesDbName = line.hasOption("rules-db-name") ? line.getOptionValue("rules-db-name") : OfflineConfig.DEFAULT_RULES_DB_NAME; double purityThreshold = DEFAULT_PURITY_THRESHOLD; if (line.hasOption("purity-threshold")) { purityThreshold = Double.parseDouble(line.getOptionValue("purity-threshold")); }//from w ww . j av a 2 s . c o m Set<SecurityIndexType> securityIndexTypes = null; if (line.hasOption("security-index-types")) { securityIndexTypes = Arrays.stream(line.getOptionValue("security-index-types").split(",")) .map(SecurityIndexType::valueOf).collect(Collectors.toSet()); } Path outputCsvFile = null; if (line.hasOption("output-csv-file")) { outputCsvFile = Paths.get(line.getOptionValue("output-csv-file")); } boolean stopWcaOnViolations = DEFAULT_STOP_WCA_ON_VIOLATIONS; if (line.hasOption("stop-on-violations")) { stopWcaOnViolations = Boolean.parseBoolean(line.getOptionValue("stop-on-violations")); } try (ComputationManager computationManager = new LocalComputationManager()) { WCAParameters parameters = new WCAParameters(histoInterval, offlineWorkflowId, securityIndexTypes, purityThreshold, stopWcaOnViolations); OnlineConfig config = OnlineConfig.load(); ContingenciesAndActionsDatabaseClient contingenciesDb = config.getContingencyDbClientFactoryClass() .newInstance().create(); LoadFlowFactory loadFlowFactory = config.getLoadFlowFactoryClass().newInstance(); WCAFactory wcaFactory = config.getWcaFactoryClass().newInstance(); try (HistoDbClient histoDbClient = new SynchronizedHistoDbClient( config.getHistoDbClientFactoryClass().newInstance().create()); RulesDbClient rulesDbClient = config.getRulesDbClientFactoryClass().newInstance() .create(rulesDbName)) { UncertaintiesAnalyserFactory uncertaintiesAnalyserFactory = config .getUncertaintiesAnalyserFactoryClass().newInstance(); if (Files.isRegularFile(caseFile)) { if (outputCsvFile != null) { throw new RuntimeException( "In case of single wca, only standard output pretty print is supported"); } System.out.println("loading case..."); // load the network Network network = Importers.loadNetwork(caseFile); if (network == null) { throw new RuntimeException("Case '" + caseFile + "' not found"); } network.getStateManager().allowStateMultiThreadAccess(true); WCA wca = wcaFactory.create(network, computationManager, histoDbClient, rulesDbClient, uncertaintiesAnalyserFactory, contingenciesDb, loadFlowFactory); WCAAsyncResult result = wca.runAsync(StateManager.INITIAL_STATE_ID, parameters).join(); Table table = new Table(3, BorderStyle.CLASSIC_WIDE); table.addCell("Contingency"); table.addCell("Cluster"); table.addCell("Causes"); List<CompletableFuture<WCACluster>> futureClusters = new LinkedList<>(result.getClusters()); while (futureClusters.size() > 0) { CompletableFuture .anyOf(futureClusters.toArray(new CompletableFuture[futureClusters.size()])).join(); for (Iterator<CompletableFuture<WCACluster>> it = futureClusters.iterator(); it .hasNext();) { CompletableFuture<WCACluster> futureCluster = it.next(); if (futureCluster.isDone()) { it.remove(); WCACluster cluster = futureCluster.get(); if (cluster != null) { System.out.println("contingency " + cluster.getContingency().getId() + " done: " + cluster.getNum() + " (" + cluster.getOrigin() + ")"); table.addCell(cluster.getContingency().getId()); table.addCell(cluster.getNum() + " (" + cluster.getOrigin() + ")"); List<String> sortedCauses = cluster.getCauses().stream().sorted() .collect(Collectors.toList()); if (sortedCauses != null && sortedCauses.size() > 0) { table.addCell(sortedCauses.get(0)); for (int i = 1; i < sortedCauses.size(); i++) { table.addCell(""); table.addCell(""); table.addCell(sortedCauses.get(i)); } } else { table.addCell(""); } } } } } System.out.println(table.render()); } else if (Files.isDirectory(caseFile)) { if (outputCsvFile == null) { throw new RuntimeException( "In case of multiple wca, you have to specify and ouput to csv file"); } Map<String, Map<String, WCACluster>> clusterPerContingencyPerBaseCase = Collections .synchronizedMap(new TreeMap<>()); Set<String> contingencyIds = Collections.synchronizedSet(new TreeSet<>()); Importers.loadNetworks(caseFile, true, network -> { try { network.getStateManager().allowStateMultiThreadAccess(true); String baseStateId = network.getId(); network.getStateManager().cloneState(StateManager.INITIAL_STATE_ID, baseStateId); network.getStateManager().setWorkingState(baseStateId); WCA wca = wcaFactory.create(network, computationManager, histoDbClient, rulesDbClient, uncertaintiesAnalyserFactory, contingenciesDb, loadFlowFactory); WCAAsyncResult result = wca.runAsync(baseStateId, parameters).join(); Map<String, WCACluster> clusterPerContingency = new HashMap<>(); List<CompletableFuture<WCACluster>> futureClusters = new LinkedList<>( result.getClusters()); while (futureClusters.size() > 0) { CompletableFuture .anyOf(futureClusters.toArray(new CompletableFuture[futureClusters.size()])) .join(); for (Iterator<CompletableFuture<WCACluster>> it = futureClusters.iterator(); it .hasNext();) { CompletableFuture<WCACluster> futureCluster = it.next(); if (futureCluster.isDone()) { it.remove(); WCACluster cluster = futureCluster.get(); if (cluster != null) { System.out.println("case " + network.getId() + ", contingency " + cluster.getContingency().getId() + " done: " + cluster.getNum() + " (" + cluster.getOrigin() + ")"); clusterPerContingency.put(cluster.getContingency().getId(), cluster); contingencyIds.add(cluster.getContingency().getId()); } } } } clusterPerContingencyPerBaseCase.put(network.getId(), clusterPerContingency); } catch (Exception e) { LOGGER.error(e.toString(), e); } }, dataSource -> System.out.println("loading case " + dataSource.getBaseName() + "...")); writeClustersCsv(clusterPerContingencyPerBaseCase, contingencyIds, outputCsvFile); } } } }
From source file:org.jpac.JPac.java
protected JPac() { super();/*from w ww . j av a2 s . co m*/ setName(getClass().getSimpleName()); tracePoint = 0; minRemainingCycleTime = Long.MAX_VALUE; maxRemainingCycleTime = 0; expectedCycleEndTime = 0; cycleStartTime = 0; // nextCycleStartTime = 0; status = Status.initializing; cycleNumber = 0; awaitedEventList = Collections.synchronizedSet(new HashSet<Fireable>()); awaitedSimEventList = Collections.synchronizedSet(new HashSet<Fireable>()); firedEventList = new HashSet<Fireable>(); readyToShutdown = false; emergencyStopRequested = false; emergencyStopActive = false; emergencyStopIsToBeThrown = false; emergencyStopCausedBy = null; synchronizedTasks = Collections.synchronizedList(new ArrayList<Runnable>()); cyclicTasks = Collections.synchronizedList(new ArrayList<CyclicTask>()); startCycle = new Semaphore(1); cycleEnded = new Semaphore(1); startCycling = new Synchronisation(); stopCycling = new Synchronisation(); shutdownRequest = new Synchronisation(); running = false; activeEventsLock = new CountingLock(); awaitedEventOfLastModule = null; moduleList = new ArrayList<AbstractModule>(20); traceQueue = null; cycleHistogramm = null; systemHistogramm = null; modulesHistogramm = null; processedModule = null; exitCode = 0; incrementCounter = 0; decrementCounter = 0; try { propCycleTime = new LongProperty(this, "CycleTime", DEFAULTCYCLETIME, "[ns]", true); propCycleTimeoutTime = new LongProperty(this, "CycleTimeoutTime", DEFAULTCYCLETIMEOUTTIME, "[ns]", true); propCycleMode = new StringProperty(this, "CycleMode", CycleMode.FreeRunning.toString(), "[OneCycle | Bound | LazyBound | FreeRunning]", true); propRunningInsideAnIde = new BooleanProperty(this, "RunningInsideAnIde", false, "will pop up a small window to close the application", true); propRunningInjUnitTest = new BooleanProperty(this, "RunningInjUnitTest", false, "helpful, if jPac is run in a jUnit test", true); propEnableTrace = new BooleanProperty(this, "EnableTrace", false, "enables tracing of the module activity", true); propTraceTimeMinutes = new IntProperty(this, "TraceTimeMinutes", 0, "used to estimate the length of the trace buffer [min]", true); propPauseOnBreakPoint = new BooleanProperty(this, "pauseOnBreakPoint", false, "cycle is paused, until all modules enter waiting state", true); propRemoteSignalsEnabled = new BooleanProperty(this, "RemoteSignalsEnabled", false, "enable connections to/from remote JPac instances", true); propRemoteSignalPort = new IntProperty(this, "RemoteSignalPort", 10002, "server port for remote signal access", true); propStoreHistogrammsOnShutdown = new BooleanProperty(this, "storeHistogrammsOnShutdown", false, "enables storing of histogramm data on shutdown", true); propHistogrammFile = new StringProperty(this, "HistogrammFile", "./data/histogramm.csv", "file in which the histogramms are stored", true); propCyclicTaskShutdownTimeoutTime = new LongProperty(this, "CyclicTaskShutdownTimeoutTime", DEFAULTCYCLICTASKSHUTDOWNTIMEOUTTIME, "Timeout for all cyclic tasks to stop on shutdown [ns]", true); instanceIdentifier = InetAddress.getLocalHost().getHostName() + ":" + propRemoteSignalPort.get(); cycleTime = propCycleTime.get(); cycleTimeoutTime = propCycleTimeoutTime.get(); cycleMode = CycleMode.valueOf(propCycleMode.get()); runningInsideAnIde = propRunningInsideAnIde.get(); runningInjUnitTest = propRunningInjUnitTest.get(); enableTrace = propEnableTrace.get(); traceTimeMinutes = propTraceTimeMinutes.get(); pauseOnBreakPoint = propPauseOnBreakPoint.get(); remoteSignalsEnabled = propRemoteSignalsEnabled.get(); remoteSignalPort = propRemoteSignalPort.get(); storeHistogrammsOnShutdown = propStoreHistogrammsOnShutdown.get(); histogrammFile = propHistogrammFile.get(); cyclicTaskShutdownTimeoutTime = propCyclicTaskShutdownTimeoutTime.get(); //install configuration saver try { registerCyclicTask(Configuration.getInstance().getConfigurationSaver()); } catch (WrongUseException exc) { /*cannot happen*/} } catch (UnknownHostException ex) { Log.error("Error: ", ex); //properties cannot be initialized //kill application System.exit(99); } catch (ConfigurationException ex) { Log.error("Error: ", ex); //properties cannot be initialized //kill application System.exit(99); } //install a shutdown hook to handle application shutdowns Runtime.getRuntime().addShutdownHook(new ShutdownHook()); setPriority(MAX_PRIORITY); //start instance of the automationController start(); }
From source file:edu.illinois.imunit.examples.apache.collections.TestBlockingBuffer.java
/** * Tests {@link BlockingBuffer#remove()} in combination with {@link BlockingBuffer#addAll(java.util.Collection)} * using multiple read threads.// ww w .j a v a2s. c om * <p/> * Two read threads should block on an empty buffer until a collection with two distinct objects is added then both * threads should complete. Each thread should have read a different object. * @throws InterruptedException */ @Test @Schedules({ @Schedule(name = "BlockedRemoveWithAddAll2", value = "[beforeNullRemove: afterNullRemove]@readThread1->beforeAddAll@main," + "[beforeNullRemove: afterNullRemove]@readThread2->beforeAddAll@main," + "afterNullRemove@readThread1->afterAddAll@main," + "afterNullRemove@readThread2->afterAddAll@main") }) public void testBlockedRemoveWithAddAll2() throws InterruptedException { Buffer blockingBuffer = BlockingBuffer.decorate(new MyBuffer()); Object obj1 = new Object(); Object obj2 = new Object(); Set objs = Collections.synchronizedSet(new HashSet()); objs.add(obj1); objs.add(obj2); // run methods will remove and compare -- must wait for addAll Thread thread1 = new ReadThread(blockingBuffer, objs, "remove", "BlockedRemoveWithAddAll2", "readThread1"); Thread thread2 = new ReadThread(blockingBuffer, objs, "remove", "BlockedRemoveWithAddAll2", "readThread2"); thread1.start(); thread2.start(); // give hungry read threads ample time to hang try { // Thread.sleep(100); } catch (Exception e) { e.printStackTrace(); } fireEvent("beforeAddAll"); blockingBuffer.addAll(objs); // allow notified threads to complete try { // Thread.sleep(100); } catch (Exception e) { e.printStackTrace(); } fireEvent("afterAddAll"); assertEquals("BlockedRemoveWithAddAll2", 0, objs.size()); // There should not be any threads waiting. thread1.join(); thread2.join(); //assertFalse( "BlockedRemoveWithAddAll1", thread1.isAlive() || thread2.isAlive() ); //if( thread1.isAlive() || thread2.isAlive() ) { //fail( "Live thread(s) when both should be dead." ); //} }