List of usage examples for java.util Collections synchronizedList
public static <T> List<T> synchronizedList(List<T> list)
From source file:org.apache.accumulo.core.file.rfile.MultiThreadedRFileTest.java
@SuppressFBWarnings(value = "INFORMATION_EXPOSURE_THROUGH_AN_ERROR_MESSAGE", justification = "information put into error message is safe and used for testing") @Test//ww w.j av a2 s. c om public void testMultipleReaders() throws IOException { final List<Throwable> threadExceptions = Collections.synchronizedList(new ArrayList<Throwable>()); Map<String, MutableInt> messages = new HashMap<>(); Map<String, String> stackTrace = new HashMap<>(); final TestRFile trfBase = new TestRFile(conf); writeData(trfBase); trfBase.openReader(); try { validate(trfBase); final TestRFile trfBaseCopy = trfBase.deepCopy(); validate(trfBaseCopy); // now start up multiple RFile deepcopies int maxThreads = 10; String name = "MultiThreadedRFileTestThread"; ThreadPoolExecutor pool = new ThreadPoolExecutor(maxThreads + 1, maxThreads + 1, 5 * 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), new NamingThreadFactory(name)); pool.allowCoreThreadTimeOut(true); try { Runnable runnable = () -> { try { TestRFile trf = trfBase; synchronized (trfBaseCopy) { trf = trfBaseCopy.deepCopy(); } validate(trf); } catch (Throwable t) { threadExceptions.add(t); } }; for (int i = 0; i < maxThreads; i++) { pool.submit(runnable); } } finally { pool.shutdown(); try { pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { e.printStackTrace(); } } for (Throwable t : threadExceptions) { String msg = t.getClass() + " : " + t.getMessage(); if (!messages.containsKey(msg)) { messages.put(msg, new MutableInt(1)); } else { messages.get(msg).increment(); } StringWriter string = new StringWriter(); PrintWriter writer = new PrintWriter(string); t.printStackTrace(writer); writer.flush(); stackTrace.put(msg, string.getBuffer().toString()); } } finally { trfBase.closeReader(); trfBase.close(); } for (String message : messages.keySet()) { LOG.error(messages.get(message) + ": " + message); LOG.error(stackTrace.get(message)); } assertTrue(threadExceptions.isEmpty()); }
From source file:com.vmware.photon.controller.cloudstore.xenon.entity.SchedulingConstantGeneratorTest.java
/** * Test for distinct scheduling constants, creating hosts concurrently on a * single Xenon host./*from w ww .jav a 2 s.c om*/ */ @Test(dataProvider = "HostCounts") public void testDistinctSchedulingConstantsConcurrent(int hostCount) throws Throwable { List<Long> schedulingConstants = Collections.synchronizedList(new ArrayList<>()); TestEnvironment env = TestEnvironment.create(1); List<Thread> threads = new ArrayList<>(); ServiceHost xenonHost = env.getHosts()[0]; IntStream.range(0, THREADS).forEach((threadId) -> { Thread t = new Thread(() -> { List<Long> thisThreadSchedulingConstants = createHosts(xenonHost, hostCount); schedulingConstants.addAll(thisThreadSchedulingConstants); }); t.start(); threads.add(t); }); for (Thread t : threads) { t.join(); } env.stop(); assertThat(schedulingConstants.size(), equalTo(hostCount * THREADS)); // Check that all scheduling constants are distinct (see note in // testDistinctSchedulingConstantsSerial) Set<Long> schedulingConstantsSet = new HashSet<>(); schedulingConstantsSet.addAll(schedulingConstants); assertThat(schedulingConstantsSet.size(), equalTo(schedulingConstants.size())); }
From source file:org.springframework.integration.jms.SubscribableJmsChannelTests.java
@Test public void topicName() throws Exception { final CountDownLatch latch = new CountDownLatch(4); final List<Message<?>> receivedList1 = Collections.synchronizedList(new ArrayList<Message<?>>()); MessageHandler handler1 = new MessageHandler() { public void handleMessage(Message<?> message) { receivedList1.add(message);/*from w w w . j av a2 s . c o m*/ latch.countDown(); } }; final List<Message<?>> receivedList2 = Collections.synchronizedList(new ArrayList<Message<?>>()); MessageHandler handler2 = new MessageHandler() { public void handleMessage(Message<?> message) { receivedList2.add(message); latch.countDown(); } }; JmsChannelFactoryBean factoryBean = new JmsChannelFactoryBean(true); factoryBean.setConnectionFactory(this.connectionFactory); factoryBean.setDestinationName("dynamicTopic"); factoryBean.setPubSubDomain(true); factoryBean.afterPropertiesSet(); SubscribableJmsChannel channel = (SubscribableJmsChannel) factoryBean.getObject(); channel.afterPropertiesSet(); channel.start(); if (!waitUntilRegisteredWithDestination(channel, 10000)) { fail("Listener failed to subscribe to topic"); } channel.subscribe(handler1); channel.subscribe(handler2); channel.send(new GenericMessage<String>("foo")); channel.send(new GenericMessage<String>("bar")); latch.await(TIMEOUT, TimeUnit.MILLISECONDS); assertEquals(2, receivedList1.size()); assertEquals("foo", receivedList1.get(0).getPayload()); assertEquals("bar", receivedList1.get(1).getPayload()); assertEquals(2, receivedList2.size()); assertEquals("foo", receivedList2.get(0).getPayload()); assertEquals("bar", receivedList2.get(1).getPayload()); channel.stop(); }
From source file:org.apache.edgent.samples.apps.sensorAnalytics.Sensor1.java
/** * Periodically publish the lastN on a stream. * @param stream tuples to /*from w w w.j av a 2 s .c om*/ * @param count sliding window size "lastN" * @param nSec publish frequency * @param event sensor's publish event label */ private void periodicallyPublishLastNInfo(TStream<JsonObject> stream, int count, int nSec, String event) { // Demonstrate periodic publishing of a sliding window if // something changed since it was last published. // Maintain a sliding window of the last N tuples. // TODO today, windows don't provide "anytime" access to their collection // so maintain our own current copy of the collection that we can // access it when needed. // List<JsonObject> lastN = Collections.synchronizedList(new ArrayList<>()); stream.last(count, JsonTuples.keyFn()).aggregate((samples, key) -> samples).tag(event + ".lastN") .sink(samples -> { // Capture the new list/window. synchronized (lastN) { lastN.clear(); lastN.addAll(samples); } }); // Publish the lastN (with trimmed down info) every nSec seconds // if anything changed since the last publish. TStream<JsonObject> periodicLastN = t.poll(() -> 1, nSec, TimeUnit.SECONDS).tag(event + ".trigger") .filter(trigger -> !lastN.isEmpty()).tag(event + ".changed").map(trigger -> { synchronized (lastN) { // create a single JsonObject with the list // of reduced-content samples JsonObject jo = new JsonObject(); jo.addProperty(KEY_ID, sensorId); jo.addProperty(KEY_TS, System.currentTimeMillis()); jo.addProperty("window", count); jo.addProperty("pubFreqSec", nSec); JsonArray ja = new JsonArray(); jo.add("lastN", ja); for (JsonObject j : lastN) { JsonObject jo2 = new JsonObject(); ja.add(jo2); jo2.add(KEY_TS, j.get(KEY_TS)); // reduce size: include only 2 significant digits jo2.addProperty(KEY_READING, String.format("%.2f", JsonTuples.getStatistic(j, MEAN).getAsDouble())); } lastN.clear(); return jo; } }).tag(event); traceStream(periodicLastN, event); // Use a pressureReliever to prevent backpressure if the broker // can't be contacted. // TODO enhance MqttDevice with configurable reliever. app.mqttDevice().events( PlumbingStreams.pressureReliever(periodicLastN, tuple -> 0, 30).tag(event + ".pressureRelieved"), app.sensorEventId(sensorId, event), QoS.FIRE_AND_FORGET); }
From source file:org.apache.hadoop.hbase.security.TestSecureIPC.java
/** * Sets up a RPC Server and a Client. Does a RPC checks the result. If an exception is thrown from * the stub, this function will throw root cause of that exception. *//*w w w. ja v a 2s . c o m*/ private void callRpcService(User clientUser) throws Exception { SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); Mockito.when(securityInfoMock.getServerPrincipal()).thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); SecurityInfo.addInfo("TestProtobufRpcProto", securityInfoMock); InetSocketAddress isa = new InetSocketAddress(HOST, 0); RpcServerInterface rpcServer = new RpcServer(null, "AbstractTestSecureIPC", Lists.newArrayList(new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), isa, serverConf, new FifoRpcScheduler(serverConf, 1)); rpcServer.start(); try (RpcClient rpcClient = RpcClientFactory.createClient(clientConf, HConstants.DEFAULT_CLUSTER_ID.toString())) { BlockingInterface stub = newBlockingStub(rpcClient, rpcServer.getListenerAddress(), clientUser); TestThread th1 = new TestThread(stub); final Throwable exception[] = new Throwable[1]; Collections.synchronizedList(new ArrayList<Throwable>()); Thread.UncaughtExceptionHandler exceptionHandler = new Thread.UncaughtExceptionHandler() { public void uncaughtException(Thread th, Throwable ex) { exception[0] = ex; } }; th1.setUncaughtExceptionHandler(exceptionHandler); th1.start(); th1.join(); if (exception[0] != null) { // throw root cause. while (exception[0].getCause() != null) { exception[0] = exception[0].getCause(); } throw (Exception) exception[0]; } } finally { rpcServer.stop(); } }
From source file:com.gemini.provision.loadbalancer.openstack.LoadBalancerProviderOpenStackImpl.java
@Override public List<GeminiLoadBalancerPool> listAllPools(GeminiTenant tenant, GeminiEnvironment env) { List<GeminiLoadBalancerPool> lbPools = Collections.synchronizedList(new ArrayList()); //authenticate the session with the OpenStack installation OSClient os = OSFactory.builder().endpoint(env.getEndPoint()) .credentials(env.getAdminUserName(), env.getAdminPassword()).tenantName(tenant.getName()) .authenticate();/*from ww w. ja v a 2s .c om*/ if (os == null) { Logger.error("Failed to authenticate Tenant: {}", ToStringBuilder.reflectionToString(tenant, ToStringStyle.MULTI_LINE_STYLE)); return null; } List<? extends LbPool> osLbPools = os.networking().loadbalancers().lbPool().list(); osLbPools.stream().filter(lbPool -> lbPool != null).forEach(lbPool -> { GeminiLoadBalancerPool loadBalancerPool = new GeminiLoadBalancerPool(); loadBalancerPool.setCloudID(lbPool.getId()); loadBalancerPool.setName(lbPool.getName()); loadBalancerPool.setDescription(lbPool.getDescription()); //TODO get the VpId from the pool loadBalancerPool.setVipID(lbPool.getVipId()); loadBalancerPool.setProtocol(Protocol.fromString(lbPool.getProtocol())); loadBalancerPool.setLoadBalancerAlgorithm(LoadBalancerAlgorithm.fromString(lbPool.getLbMethod())); //TODO set the pool member loadBalancerPool.setAdminState(lbPool.isAdminStateUp() ? AdminState.ADMIN_UP : AdminState.ADMIN_DOWN); GeminiSubnet subnet = env.getApplications().stream().map(GeminiApplication::getNetworks) .flatMap(List::stream).map(GeminiNetwork::getSubnets).flatMap(List::stream) .filter(s -> s.getCloudID().equals(lbPool.getId())).findFirst().get(); if (subnet == null) { Logger.info( "Load Balancer cloud ID {} references a subnet not available in environment {} Subnet ID: {}", lbPool.getId(), env.getName(), lbPool.getSubnetId()); } else { loadBalancerPool.setGeminiSubnet(subnet); } lbPools.add(loadBalancerPool); }); return lbPools; }
From source file:org.eclipse.hudson.init.InitialSetup.java
public HttpResponse doinstallPlugin(@QueryParameter String pluginName) { if (!hudsonSecurityManager.hasPermission(Permission.HUDSON_ADMINISTER)) { return HttpResponses.forbidden(); }//from www .j ava2s .c o m AvailablePluginInfo plugin = updateSiteManager.getAvailablePlugin(pluginName); try { PluginInstallationJob installJob = null; // If the plugin is already being installed, don't schedule another. Make the search thread safe List<PluginInstallationJob> jobs = Collections.synchronizedList(installationsJobs); synchronized (jobs) { for (PluginInstallationJob job : jobs) { if (job.getName().equals(pluginName)) { installJob = job; } } } // No previous install of the plugn, create new if (installJob == null) { Future<PluginInstallationJob> newJob = install(plugin); installJob = newJob.get(); } if (!installJob.getStatus()) { return new ErrorHttpResponse( "Plugin " + pluginName + " could not be installed. " + installJob.getErrorMsg()); } } catch (Exception ex) { return new ErrorHttpResponse( "Plugin " + pluginName + " could not be installed. " + ex.getLocalizedMessage()); } reCheck(); return HttpResponses.ok(); }
From source file:org.opoo.press.impl.SiteImpl.java
void reset() { this.time = config.get("time", new Date()); this.pages = new ArrayList<Page>(); this.posts = new ArrayList<Post>(); //Call #add() in multi-threading this.staticFiles = Collections.synchronizedList(new ArrayList<StaticFile>()); // resetCategories(); // resetTags(); }
From source file:com.github.drbookings.ui.controller.UpcomingController.java
private void addEvents(final LocalDate date, final Collection<BookingEntry> upcomingBookings, final Collection<CleaningEntry> upcomingCleanings) { final VBox box = new VBox(4); if (date.equals(LocalDate.now())) { box.getStyleClass().add("first-day"); } else if (date.equals(LocalDate.now().plusDays(1))) { box.getStyleClass().add("second-day"); } else if (date.isAfter(LocalDate.now().plusDays(1))) { box.getStyleClass().add("later"); }// w w w . j a va2s. co m if (upcomingBookings.stream().filter(b -> b.isCheckIn() || b.isCheckOut()).collect(Collectors.toList()) .isEmpty() && upcomingCleanings.isEmpty()) { final Text t0 = new Text(getDateString(date)); final Text t1 = new Text(" there are no events."); t0.getStyleClass().add("emphasis"); final TextFlow tf = new TextFlow(); tf.getChildren().addAll(t0, t1); box.getChildren().addAll(tf); } else { final List<CheckInOutDetails> checkInNotes = Collections.synchronizedList(new ArrayList<>()); final List<CheckInOutDetails> checkOutNotes = Collections.synchronizedList(new ArrayList<>()); upcomingBookings.forEach(b -> { if (b.isCheckIn()) { String note = ""; if (b.getElement().getCheckInNote() != null) { note = b.getElement().getCheckInNote(); } if (b.getElement().getSpecialRequestNote() != null) { note = note + "\n" + b.getElement().getSpecialRequestNote(); } checkInNotes.add(new CheckInOutDetails(b.getRoom().getName(), b.getElement().getBookingOrigin().getName(), note)); } else if (b.isCheckOut()) { checkOutNotes.add(new CheckInOutDetails(b.getRoom().getName(), b.getElement().getBookingOrigin().getName(), b.getElement().getCheckOutNote())); } }); Collections.sort(checkInNotes); Collections.sort(checkOutNotes); addGeneralSummary(date, box, checkInNotes); addCheckOutSummary(date, box, checkOutNotes); addCheckOutNotes(date, box, checkOutNotes); addCheckInSummary(date, box, checkInNotes); addCheckInNotes(date, box, checkInNotes); addCleaningSummary(date, box, upcomingCleanings); addCleanings(date, box, upcomingCleanings); } this.box.getChildren().add(box); }
From source file:org.domainmath.gui.update.UpdateFrame.java
private List<File> getFileList(File datafile) { List<File> data = Collections.synchronizedList(new ArrayList()); String line;// w w w .j a v a 2 s . c o m try { FileInputStream fin = new FileInputStream(datafile); BufferedReader br = new BufferedReader(new InputStreamReader(fin)); try { while ((line = br.readLine()) != null) { StringTokenizer s2 = new StringTokenizer(line, "\n"); while (s2.hasMoreTokens()) { data.add(new File(System.getProperty("user.dir") + File.separator + s2.nextToken())); } } br.close(); } catch (IOException ex) { dispose(); } } catch (FileNotFoundException ex) { String error = "You can not continue update process,you" + "are using older version of DomainMath IDE.You can download latest" + "version from Internet."; new Error(this, "Unable to update.", error); dispose(); } return data; }