List of usage examples for java.util Collections synchronizedList
public static <T> List<T> synchronizedList(List<T> list)
From source file:org.piwik.sdk.TestDispatcher.java
@Test public void testRandomDispatchIntervals() throws Exception { final Tracker tracker = createTracker(); final int threadCount = 10; final int queryCount = 100; final List<String> createdEvents = Collections.synchronizedList(new ArrayList<String>()); new Thread(new Runnable() { @Override/*w w w . j a v a 2 s .c o m*/ public void run() { try { while (getFlattenedQueries(new ArrayList<>(tracker.getDispatcher().getDryRunOutput())) .size() != threadCount * queryCount) tracker.setDispatchInterval(new Random().nextInt(20 - -1) + -1); } catch (Exception e) { e.printStackTrace(); } } }).start(); launchTestThreads(tracker, threadCount, queryCount, createdEvents); checkForMIAs(threadCount * queryCount, createdEvents, tracker.getDispatcher().getDryRunOutput()); }
From source file:de.codecentric.batch.jsr352.CustomJsrJobOperator.java
@Override public long start(String jobName, Properties params) throws JobStartException, JobSecurityException { final JsrXmlApplicationContext batchContext = new JsrXmlApplicationContext(params); batchContext.setValidating(false);/* ww w . ja v a2 s. co m*/ Resource batchXml = new ClassPathResource("/META-INF/batch.xml"); String jobConfigurationLocation = "/META-INF/batch-jobs/" + jobName + ".xml"; Resource jobXml = new ClassPathResource(jobConfigurationLocation); if (batchXml.exists()) { batchContext.load(batchXml); } if (jobXml.exists()) { batchContext.load(jobXml); } AbstractBeanDefinition beanDefinition = BeanDefinitionBuilder .genericBeanDefinition("org.springframework.batch.core.jsr.JsrJobContextFactoryBean") .getBeanDefinition(); beanDefinition.setScope(BeanDefinition.SCOPE_SINGLETON); batchContext.registerBeanDefinition(JSR_JOB_CONTEXT_BEAN_NAME, beanDefinition); batchContext.setParent(parentContext); try { batchContext.refresh(); } catch (BeanCreationException e) { throw new JobStartException(e); } Assert.notNull(jobName, "The job name must not be null."); final org.springframework.batch.core.JobExecution jobExecution; try { JobParameters jobParameters = jobParametersConverter.getJobParameters(params); String[] jobNames = batchContext.getBeanNamesForType(Job.class); if (jobNames == null || jobNames.length <= 0) { throw new BatchRuntimeException("No Job defined in current context"); } org.springframework.batch.core.JobInstance jobInstance = jobRepository.createJobInstance(jobNames[0], jobParameters); jobExecution = jobRepository.createJobExecution(jobInstance, jobParameters, jobConfigurationLocation); } catch (Exception e) { throw new JobStartException(e); } try { final Semaphore semaphore = new Semaphore(1); final List<Exception> exceptionHolder = Collections.synchronizedList(new ArrayList<Exception>()); semaphore.acquire(); taskExecutor.execute(new Runnable() { @Override public void run() { JsrJobContextFactoryBean factoryBean = null; try { factoryBean = (JsrJobContextFactoryBean) batchContext .getBean("&" + JSR_JOB_CONTEXT_BEAN_NAME); factoryBean.setJobExecution(jobExecution); final AbstractJob job = batchContext.getBean(AbstractJob.class); addListenerToJobService.addListenerToJob(job); semaphore.release(); // Initialization of the JobExecution for job level dependencies jobRegistry.register(job, jobExecution); job.execute(jobExecution); jobRegistry.remove(jobExecution); } catch (Exception e) { exceptionHolder.add(e); } finally { if (factoryBean != null) { factoryBean.close(); } batchContext.close(); if (semaphore.availablePermits() == 0) { semaphore.release(); } } } }); semaphore.acquire(); if (exceptionHolder.size() > 0) { semaphore.release(); throw new JobStartException(exceptionHolder.get(0)); } } catch (Exception e) { if (jobRegistry.exists(jobExecution.getId())) { jobRegistry.remove(jobExecution); } jobExecution.upgradeStatus(BatchStatus.FAILED); if (jobExecution.getExitStatus().equals(ExitStatus.UNKNOWN)) { jobExecution.setExitStatus(ExitStatus.FAILED.addExitDescription(e)); } jobRepository.update(jobExecution); if (batchContext.isActive()) { batchContext.close(); } throw new JobStartException(e); } return jobExecution.getId(); }
From source file:com.streamsets.pipeline.stage.origin.spooldir.TestWholeFileSpoolDirSource.java
@Test public void testWholeFileRecordsCopy() throws Exception { Path sourcePath = Paths.get(testDir + "/source.txt"); Files.write(sourcePath, "Sample Text 1".getBytes()); SpoolDirSource source = createSource(); PushSourceRunner runner = new PushSourceRunner.Builder(SpoolDirDSource.class, source).addOutputLane("lane") .setOnRecordError(OnRecordError.TO_ERROR).build(); final List<Record> records = Collections.synchronizedList(new ArrayList<>(10)); AtomicInteger batchCount = new AtomicInteger(0); runner.runInit();//www. j ava2 s . c om try { runner.runProduce(new HashMap<>(), 10, output2 -> { synchronized (records) { records.addAll(output2.getRecords().get("lane")); } batchCount.incrementAndGet(); runner.setStop(); }); runner.waitOnProduce(); Assert.assertNotNull(records); Assert.assertEquals(1, records.size()); Record record = records.get(0); Assert.assertTrue(record.has(FileRefUtil.FILE_INFO_FIELD_PATH)); Assert.assertTrue(record.has(FileRefUtil.FILE_REF_FIELD_PATH)); FileRef fileRef = record.get(FileRefUtil.FILE_REF_FIELD_PATH).getValueAsFileRef(); String targetFile = testDir + "/target.txt"; Stage.Context context = (Stage.Context) Whitebox.getInternalState(source, "context"); initMetrics(context); IOUtils.copy(fileRef.createInputStream(context, InputStream.class), new FileOutputStream(targetFile)); //Now make sure the file is copied properly, checkFileContent(new FileInputStream(sourcePath.toString()), new FileInputStream(targetFile)); } finally { runner.runDestroy(); } }
From source file:org.piwik.sdk.dispatcher.DispatcherTest.java
@Test public void testMultiThreadDispatch() throws Exception { List<Packet> dryRunData = Collections.synchronizedList(new ArrayList<Packet>()); mDispatcher.setDryRunTarget(dryRunData); mDispatcher.setDispatchInterval(20); final int threadCount = 20; final int queryCount = 100; final List<String> createdEvents = Collections.synchronizedList(new ArrayList<String>()); launchTestThreads(mApiUrl, mDispatcher, threadCount, queryCount, createdEvents); checkForMIAs(threadCount * queryCount, createdEvents, dryRunData); }
From source file:com.gargoylesoftware.htmlunit.javascript.host.xml.XMLHttpRequest3Test.java
/** * Tests asynchronous use of XMLHttpRequest, where the XHR request fails due to IOException (Connection refused). * @throws Exception if the test fails/*from ww w . j av a 2s .c o m*/ */ @Test @Alerts(DEFAULT = { "0", "1", "1", "2", "4", MSG_NO_CONTENT, MSG_PROCESSING_ERROR }, FF = { "0", "1", "2", "4", MSG_NO_CONTENT, MSG_PROCESSING_ERROR }) @NotYetImplemented(CHROME) public void testAsyncUseWithNetworkConnectionFailure() throws Exception { final String html = "<html>\n" + "<head>\n" + "<title>XMLHttpRequest Test</title>\n" + "<script>\n" + "var request;\n" + "function testAsync() {\n" + " request = " + XMLHttpRequest2Test.XHRInstantiation_ + ";\n" + " request.onreadystatechange = onReadyStateChange;\n" + " request.onerror = onError;\n" + " alert(request.readyState);\n" + " request.open('GET', '" + URL_SECOND + "', true);\n" + " request.send('');\n" + "}\n" + "function onError() {\n" + " alert('" + MSG_PROCESSING_ERROR + "');\n" + "}\n" + "function onReadyStateChange() {\n" + " alert(request.readyState);\n" + " if (request.readyState == 4) {\n" + " if (request.responseText.length == 0)\n" + " alert('" + MSG_NO_CONTENT + "');" + " else\n" + " throw 'Unexpected content, should be zero length but is: \"' + request.responseText + '\"';\n" + " }\n" + "}\n" + "</script>\n" + "</head>\n" + "<body onload='testAsync()'>\n" + "</body>\n" + "</html>"; final WebClient client = getWebClient(); final List<String> collectedAlerts = Collections.synchronizedList(new ArrayList<String>()); client.setAlertHandler(new CollectingAlertHandler(collectedAlerts)); final MockWebConnection conn = new DisconnectedMockWebConnection(); conn.setResponse(URL_FIRST, html); client.setWebConnection(conn); client.getPage(URL_FIRST); assertEquals(0, client.waitForBackgroundJavaScriptStartingBefore(1000)); assertEquals(getExpectedAlerts(), collectedAlerts); }
From source file:org.apache.hive.spark.client.SparkSubmitSparkClient.java
@Override protected Future<Void> launchDriver(String isTesting, RpcServer rpcServer, String clientId) throws IOException { Callable<Void> runnable; String cmd = Joiner.on(" ").join(argv); LOG.info("Running client driver with argv: {}", cmd); ProcessBuilder pb = new ProcessBuilder("sh", "-c", cmd); // Prevent hive configurations from being visible in Spark. pb.environment().remove("HIVE_HOME"); pb.environment().remove("HIVE_CONF_DIR"); // Add credential provider password to the child process's environment // In case of Spark the credential provider location is provided in the jobConf when the job is submitted String password = getSparkJobCredentialProviderPassword(); if (password != null) { pb.environment().put(Constants.HADOOP_CREDENTIAL_PASSWORD_ENVVAR, password); }// w w w .j av a 2 s . co m if (isTesting != null) { pb.environment().put("SPARK_TESTING", isTesting); } final Process child = pb.start(); String threadName = Thread.currentThread().getName(); final List<String> childErrorLog = Collections.synchronizedList(new ArrayList<String>()); final LogRedirector.LogSourceCallback callback = () -> isAlive; LogRedirector.redirect("spark-submit-stdout-redir-" + threadName, new LogRedirector(child.getInputStream(), LOG, callback)); LogRedirector.redirect("spark-submit-stderr-redir-" + threadName, new LogRedirector(child.getErrorStream(), LOG, childErrorLog, callback)); runnable = () -> { try { int exitCode = child.waitFor(); if (exitCode != 0) { List<String> errorMessages = new ArrayList<>(); synchronized (childErrorLog) { for (String line : childErrorLog) { if (StringUtils.containsIgnoreCase(line, "Error")) { errorMessages.add("\"" + line + "\""); } } } String errStr = errorMessages.isEmpty() ? "?" : Joiner.on(',').join(errorMessages); rpcServer.cancelClient(clientId, new RuntimeException("spark-submit process failed " + "with exit code " + exitCode + " and error " + errStr)); } } catch (InterruptedException ie) { LOG.warn( "Thread waiting on the child process (spark-submit) is interrupted, killing the child process."); rpcServer.cancelClient(clientId, "Thread waiting on the child process (spark-submit) is interrupted"); Thread.interrupted(); child.destroy(); } catch (Exception e) { String errMsg = "Exception while waiting for child process (spark-submit)"; LOG.warn(errMsg, e); rpcServer.cancelClient(clientId, errMsg); } return null; }; FutureTask<Void> futureTask = new FutureTask<>(runnable); Thread driverThread = new Thread(futureTask); driverThread.setDaemon(true); driverThread.setName("SparkSubmitMonitor"); driverThread.start(); return futureTask; }
From source file:com.vmware.photon.controller.cloudstore.xenon.entity.SchedulingConstantGeneratorTest.java
/** * Test distribution of scheduling constants, creating hosts on multiple Xenon * hosts, one thread per Xenon host./*from ww w. j a va 2 s. c o m*/ */ @Test(dataProvider = "MultiHostHostCounts") public void testSchedulingConstantVariationMultiHost(int xenonHostCount, int hostCount) throws Throwable { List<Long> schedulingConstants = Collections.synchronizedList(new ArrayList<>()); TestEnvironment env = TestEnvironment.create(xenonHostCount); List<Thread> threads = new ArrayList<>(); ServiceHost[] xenonHosts = env.getHosts(); IntStream.range(0, xenonHostCount).forEach((xenonHostId) -> { Thread t = new Thread(() -> { List<Long> thisThreadSchedulingConstants = createHosts(xenonHosts[xenonHostId], hostCount); schedulingConstants.addAll(thisThreadSchedulingConstants); }); t.start(); threads.add(t); }); for (Thread t : threads) { t.join(); } env.stop(); assertThat(schedulingConstants.size(), equalTo(hostCount * xenonHostCount)); Collections.sort(schedulingConstants); double cv = schedulingConstantGapCV(schedulingConstants); logger.info("Scheduling constant gap coefficient of variation: {}", cv); assertThat(cv, lessThan(MAX_VARIATION)); }
From source file:com.taobao.itest.listener.ITestDataSetListener.java
/** * Supports two methods of multiple data sources can only use one * // www. ja v a 2s. c o m * @param testContext * @return * @throws Exception */ List<DatasetConfig> getDatasetConfigs(TestContext testContext) throws Exception { ITestDataSet annotation = findAnnotation(testContext.getTestInstance().getClass(), testContext.getTestMethod()); if (annotation == null) return null; String[] locations = determineLocations(testContext, annotation); String[] dsNames = determineDsNames(testContext, annotation); if (dsNames.length > 1 && locations.length != dsNames.length) { String errorMsg = format("dsNames number '%s' does'nt matchs the locations number '%s'.", dsNames.length, locations.length); log.error(errorMsg); throw new RuntimeException(errorMsg); } List<DatasetConfig> datasetConfigs = Collections.synchronizedList(new LinkedList<DatasetConfig>()); // ========add by junliang, deal tddl begin======== int ruleIndex = indexOfAppRule(dsNames); if (ruleIndex != -1) { AppRule rootRule = (AppRule) SpringContextManager.getApplicationContext().getBean(dsNames[ruleIndex]); String location = locations[ruleIndex]; String fileType = location.substring(location.lastIndexOf(".") + 1); if (!XLS.equalsIgnoreCase(fileType)) { String errMsg = "Invalid file type [" + fileType + "], only XLS file supported if you want to use tddl features"; throw new RuntimeException(errMsg); } locations = removeStringFromArray(locations, ruleIndex); dsNames = removeStringFromArray(dsNames, ruleIndex); HSSFWorkbook workbook = new HSSFWorkbook( new DefaultResourceLoader().getResource(location).getInputStream()); Map<String, HSSFWorkbook> tddlMap = RuleCalUtil.calDataSet(rootRule, workbook, null);// null // now // means // use // local // machine // current // time String autoGenFilePath = location.substring(0, location.lastIndexOf("/") + 1) + "autoGen/"; Set<Entry<String, HSSFWorkbook>> entries = tddlMap.entrySet(); String writeFilePath = null; for (Entry<String, HSSFWorkbook> entry : entries) { String autoGenLocation = autoGenFilePath + entry.getKey() + ".xls"; writeFilePath = autoGenLocation.replaceAll("classpath:", "").replaceAll("[\\\\]", "/"); writeFilePath = "target/test-classes" + writeFilePath; String directory = writeFilePath.substring(0, writeFilePath.lastIndexOf("/")); File dir = new File(directory); if (!dir.isDirectory()) { dir.mkdir(); } File destFile = new File(writeFilePath); XlsDataSet.write(new XlsDataSet(entry.getValue()), new FileOutputStream(destFile)); locations = addString2Array(locations, autoGenLocation); dsNames = addString2Array(dsNames, entry.getKey()); } } // ========deal tddl end======== for (int i = 0; i < locations.length; i++) { String location = locations[i]; String fileType = location.substring(location.lastIndexOf(".") + 1); String dsName = dsNames.length == 1 ? dsNames[0] : dsNames[i]; // build dataSet begin ReplacementDataSet dataSet; if (XLS.equalsIgnoreCase(fileType)) { XlsDataSet xlsDataSet = new XlsDataSet( new DefaultResourceLoader().getResource(location).getInputStream()); // if(annotation.dsNames().length==0){//DataSource name maybe // defined in xls sheet String[] sheetNames = xlsDataSet.getTableNames(); for (String sheetName : sheetNames) { String[] temp = pattern.split(sheetName); String tableName = sheetName; if (temp.length == 2) { // add by qixiu, remove sheets that has define the // dsnames,use different datasets sheetNames = (String[]) ArrayUtils.removeElement(sheetNames, sheetName); String dsNameTmp = temp[0]; tableName = temp[1]; dataSet = new ReplacementDataSet( new DefaultDataSet(new XlsTableWrapper(tableName, xlsDataSet.getTable(sheetName)))); buildDataBaseConfig(testContext, annotation, datasetConfigs, location, dsNameTmp, dataSet); } } // add by qixiu, for normal sheets use one dataset int sheetCounts = sheetNames.length; ITable[] tables = new ITable[sheetCounts]; for (int j = 0; j <= sheetCounts - 1; j++) { tables[j] = new XlsTableWrapper(sheetNames[j], xlsDataSet.getTable(sheetNames[j])); } dataSet = new ReplacementDataSet(new DefaultDataSet(tables)); buildDataBaseConfig(testContext, annotation, datasetConfigs, location, dsName, dataSet); /* * }else{ dataSet = new ReplacementDataSet(xlsDataSet); * buildDataBaseConfig(testContext, annotation, datasetConfigs, * location, dsName, dataSet); } */ } else if (XML.equalsIgnoreCase(fileType)) { dataSet = new ReplacementDataSet( new FlatXmlDataSet(new DefaultResourceLoader().getResource(location).getInputStream())); dataSet.addReplacementObject("[NULL]", null); buildDataBaseConfig(testContext, annotation, datasetConfigs, location, dsName, dataSet); } else { String errorMsg = format("Unsupported file type,file '%s' must be xls or xml.", location); log.error(errorMsg); throw new RuntimeException(errorMsg); } // build dataSet end } return datasetConfigs; }
From source file:org.cloudata.core.tabletserver.DiskSSTable.java
public void addTabletMapFile(String columnName, TabletMapFile mapFile) throws IOException { lock.obtainWriteLock();//from w ww . ja va 2 s . c o m try { List<TabletMapFile> mapFileList = mapFiles.get(columnName); if (mapFileList == null) { mapFileList = Collections.synchronizedList(new ArrayList<TabletMapFile>()); mapFiles.put(columnName, mapFileList); } mapFileList.add(mapFile); if (mapFileList.size() > this.maxMapFileCount) { this.maxMapFileCount = mapFileList.size(); } calculateTotalMapFileSize(); } finally { lock.releaseWriteLock(); } }
From source file:com.gemini.provision.security.openstack.SecurityProviderOpenStackImpl.java
@Override public List<GeminiSecurityGroup> listServerSecurityGroups(GeminiTenant tenant, GeminiEnvironment env, GeminiServer server) {/*www . j a va 2 s .c om*/ List<GeminiSecurityGroup> listSecGrps = Collections.synchronizedList(new ArrayList()); //authenticate the session with the OpenStack installation OSClient os = OSFactory.builder().endpoint(env.getEndPoint()) .credentials(env.getAdminUserName(), env.getAdminPassword()).tenantName(tenant.getName()) .authenticate(); if (os == null) { Logger.error("Failed to authenticate Tenant: {}", ToStringBuilder.reflectionToString(tenant, ToStringStyle.MULTI_LINE_STYLE)); return null; } //get the list from OpenStack List<? extends SecGroupExtension> osSecGrps = os.compute().securityGroups() .listServerGroups(server.getCloudID()); osSecGrps.stream().forEach(osSecGrp -> { //see if this security group already exists in the environment GeminiSecurityGroup tmpSecGrp = env.getSecurityGroups().stream() .filter(s -> s.getName().equals(osSecGrp.getName())).findFirst().get(); GeminiSecurityGroup newGemSecGrp = null; if (tmpSecGrp == null) { //The OpenStack security group hasn't been mapped to an object on the Gemini side, so create it and add to the environment newGemSecGrp = new GeminiSecurityGroup(); newGemSecGrp.setCloudID(osSecGrp.getId()); newGemSecGrp.setProvisioned(true); newGemSecGrp.setName(osSecGrp.getName()); newGemSecGrp.setDescription(osSecGrp.getDescription()); env.addSecurityGroup(newGemSecGrp); } //check to see if this group's rules are mapped on the Gemini side List<? extends Rule> osSecGrpRules = osSecGrp.getRules(); final GeminiSecurityGroup gemSecGrp = tmpSecGrp == null ? newGemSecGrp : tmpSecGrp; osSecGrpRules.stream().filter(osSecGrpRule -> osSecGrpRule != null).forEach(osSecGrpRule -> { GeminiSecurityGroupRule gemSecGrpRule = gemSecGrp.getSecurityRules().stream() .filter(sr -> sr.getName().equals(osSecGrpRule.getName())).findFirst().get(); if (gemSecGrpRule == null) { //the rule has not been mapped on the Gemini side, so create it gemSecGrpRule = new GeminiSecurityGroupRule(); } gemSecGrpRule.setCloudID(osSecGrpRule.getId()); gemSecGrpRule.setProvisioned(true); gemSecGrpRule.setPortRangeMin(osSecGrpRule.getFromPort()); gemSecGrpRule.setPortRangeMax(osSecGrpRule.getToPort()); gemSecGrpRule.setProtocol(Protocol.fromString(osSecGrpRule.getIPProtocol().toString())); gemSecGrpRule.setCidr(osSecGrpRule.getRange().getCidr()); gemSecGrp.addSecurityRule(gemSecGrpRule); }); //check if this security group is attached to server on the gemini side if (server.getSecGroupNames().stream().noneMatch(s -> s.equals(osSecGrp.getName()))) { //it isn't so add it to the server server.addSecGroupName(osSecGrp.getName()); } listSecGrps.add(gemSecGrp); }); Logger.debug("Successfully retrieved security groups Tenant: {} Env: {} server {}{", tenant.getName(), env.getName(), server.getName()); return listSecGrps; }