List of usage examples for java.util Collections nCopies
public static <T> List<T> nCopies(int n, T o)
From source file:org.apache.metron.parsers.integration.components.ParserTopologyComponent.java
@Override public void start() throws UnableToStartException { try {/*w w w . ja v a 2 s . c om*/ final Map<String, Object> stormConf = new HashMap<>(); stormConf.put(Config.TOPOLOGY_DEBUG, true); ParserTopologyBuilder.ParserTopology topologyBuilder = ParserTopologyBuilder.build( topologyProperties.getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY), Optional.ofNullable(brokerUrl), sensorTypes, (x, y) -> Collections.nCopies(sensorTypes.size(), 1), (x, y) -> Collections.nCopies(sensorTypes.size(), 1), (x, y) -> 1, (x, y) -> 1, (x, y) -> 1, (x, y) -> 1, (x, y) -> Collections.nCopies(sensorTypes.size(), new HashMap<>()), (x, y) -> null, (x, y) -> outputTopic, (x, y) -> errorTopic, (x, y) -> { Config c = new Config(); c.putAll(stormConf); return c; }); stormCluster = new LocalCluster(); stormCluster.submitTopology(getTopologyName(), stormConf, topologyBuilder.getBuilder().createTopology()); } catch (Exception e) { throw new UnableToStartException("Unable to start parser topology for sensorTypes: " + sensorTypes, e); } }
From source file:org.apache.hadoop.hive.serde2.teradata.TeradataBinaryDataOutputStream.java
/** * Write TIMESTAMP(N)./*from w w w. ja v a 2 s .c o m*/ * The representation of timestamp in Teradata binary format is: * the byte number to read is based on the precision of timestamp, * each byte represents one char and the timestamp is using string representation, * eg: for 1911-11-11 19:20:21.433200 in TIMESTAMP(3), we will cut it to be 1911-11-11 19:20:21.433 and write * 31 39 31 31 2d 31 31 2d 31 31 20 31 39 3a 32 30 3a 32 31 2e 34 33 33. * the null timestamp will use space to pad. * * @param timestamp the timestamp * @param byteNum the byte number the timestamp will write * @throws IOException the io exception */ public void writeTimestamp(TimestampWritableV2 timestamp, int byteNum) throws IOException { if (timestamp == null) { String pad = join("", Collections.nCopies(byteNum, " ")); write(pad.getBytes("UTF8")); return; } String sTimeStamp = timestamp.getTimestamp().toString(); if (sTimeStamp.length() >= byteNum) { write(sTimeStamp.substring(0, byteNum).getBytes("UTF8")); return; } write(sTimeStamp.getBytes("UTF8")); String pad; if (sTimeStamp.length() == TIMESTAMP_NO_NANOS_BYTE_NUM) { pad = "." + join("", Collections.nCopies(byteNum - sTimeStamp.length() - 1, "0")); } else { pad = join("", Collections.nCopies(byteNum - sTimeStamp.length(), "0")); } write(pad.getBytes("UTF8")); }
From source file:com.qpark.eip.core.spring.lockedoperation.EipTest.java
/** * Test to run several synchronous {@link LockableOperation}s in parallel. *///from w ww .j a v a2 s. c om @Test public void testLockableOperationTestSyncCall() { this.logger.debug("+testLockableOperationTestSyncCall"); int threadCount = 4; OperationEventEnumType start = OperationEventEnumType.START; LockableOperationContext context = new LockableOperationContext(); List<OperationStateEnumType> status = new ArrayList<>(); Callable<Void> task = () -> { OperationStateEnumType value = EipTest.this.operationSync .runOperation(EipTest.this.operationSync.getUUID(), start, context); status.add(value); this.logger.debug(" testLockableOperationTestSyncCall returned {}", value); return null; }; List<Callable<Void>> tasks = Collections.nCopies(threadCount, task); ExecutorService executorService = Executors.newFixedThreadPool(threadCount); List<Future<Void>> futures; try { futures = executorService.invokeAll(tasks); List<Void> resultList = new ArrayList<>(futures.size()); // Check for exceptions for (Future<Void> future : futures) { // Throws an exception if an exception was thrown by the task. resultList.add(future.get()); } } catch (Exception e) { e.printStackTrace(); } int runnings = status.stream().filter(s -> s.equals(OperationStateEnumType.RUNNING)) .collect(Collectors.toList()).size(); int idle = status.stream().filter(s -> s.equals(OperationStateEnumType.IDLE)).collect(Collectors.toList()) .size(); Assert.assertEquals("No the right number of sync proccesses got the RUNNING return.", runnings, threadCount - 1); Assert.assertEquals("To many IDLE sync processes", idle, 1); OperationStateEnumType idleResult = this.operationSync.runOperation(this.operationSync.getUUID(), OperationEventEnumType.CHECK_STATE, context); Assert.assertEquals("Cleanup missing at sync processes", idleResult, OperationStateEnumType.IDLE); this.logger.debug("+testLockableOperationTestSyncCall"); }
From source file:com.gargoylesoftware.htmlunit.javascript.background.JavaScriptJobManagerTest.java
/** * @throws Exception if the test fails// ww w .j av a 2s. com */ @Test public void setClearIntervalUsesManager() throws Exception { final String content = "<html>\n" + "<head>\n" + " <title>test</title>\n" + " <script>\n" + " var threadID;\n" + " function test() {\n" + " threadID = setInterval(doAlert, 100);\n" + " }\n" + " var iterationNumber=0;\n" + " function doAlert() {\n" + " alert('blah');\n" + " if (++iterationNumber >= 3) {\n" + " clearInterval(threadID);\n" + " }\n" + " }\n" + " </script>\n" + "</head>\n" + "<body onload='test()'>\n" + "</body>\n" + "</html>"; final List<String> collectedAlerts = Collections.synchronizedList(new ArrayList<String>()); startTimedTest(); final HtmlPage page = loadPage(content, collectedAlerts); final JavaScriptJobManager jobManager = page.getEnclosingWindow().getJobManager(); assertNotNull(jobManager); assertEquals(1, jobManager.getJobCount()); jobManager.waitForJobs(1000); assertEquals(0, jobManager.getJobCount()); assertEquals(Collections.nCopies(3, "blah"), collectedAlerts); assertMaxTestRunTime(1000); }
From source file:org.apache.gobblin.service.SimpleKafkaSpecConsumer.java
public SimpleKafkaSpecConsumer(Config config, Optional<Logger> log) { // Consumer//www .j a va2 s .c o m String kafkaConsumerClientClass = ConfigUtils.getString(config, CONSUMER_CLIENT_FACTORY_CLASS_KEY, DEFAULT_CONSUMER_CLIENT_FACTORY_CLASS); try { Class<?> clientFactoryClass = (Class<?>) Class.forName(kafkaConsumerClientClass); final GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory factory = (GobblinKafkaConsumerClient.GobblinKafkaConsumerClientFactory) ConstructorUtils .invokeConstructor(clientFactoryClass); _kafkaConsumer = factory.create(config); } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException | InstantiationException | InvocationTargetException e) { if (log.isPresent()) { log.get().error("Failed to instantiate Kafka consumer from class " + kafkaConsumerClientClass, e); } throw new RuntimeException("Failed to instantiate Kafka consumer", e); } List<KafkaTopic> kafkaTopics = _kafkaConsumer.getFilteredTopics(Collections.EMPTY_LIST, Lists .newArrayList(Pattern.compile(config.getString(SimpleKafkaSpecExecutor.SPEC_KAFKA_TOPICS_KEY)))); _partitions = kafkaTopics.get(0).getPartitions(); _lowWatermark = Lists.newArrayList(Collections.nCopies(_partitions.size(), 0L)); _nextWatermark = Lists.newArrayList(Collections.nCopies(_partitions.size(), 0L)); _highWatermark = Lists.newArrayList(Collections.nCopies(_partitions.size(), 0L)); InputStream dummyInputStream = new ByteArrayInputStream(new byte[0]); _decoder = DecoderFactory.get().binaryDecoder(dummyInputStream, null); _reader = new SpecificDatumReader<AvroJobSpec>(AvroJobSpec.SCHEMA$); _versionWriter = new FixedSchemaVersionWriter(); }
From source file:ei.ne.ke.cassandra.cql3.Cql3StatementGenerator.java
/** * Constructs a CQL3 INSERT prepared statement for one entity. * @param numEntity//from w w w. ja va 2 s . c o m * @return a CQL3 statement */ public String buildSaveStatement(int numEntity) { List<String> mappedColumns = spec.getMappedColumns(); Mutating statement = insert().into(spec.getTable()).columns(mappedColumns); if (numEntity == 1) { return statement.build(); } else { return batch().statement(Collections.nCopies(numEntity, statement)).build(); } }
From source file:picard.analysis.TheoreticalSensitivity.java
public static List<ArrayList<Double>> proportionsAboveThresholds(final List<ArrayList<Integer>> lists, final List<Double> thresholds) { final ArrayList<ArrayList<Double>> result = new ArrayList<>(); for (final ArrayList<Integer> list : lists) { final ArrayList<Double> newRow = new ArrayList<>(Collections.nCopies(thresholds.size(), 0.0)); Collections.sort(list);//w w w .j av a2 s. c o m int n = 0; int j = 0; //index within the ordered sample while (n < thresholds.size() && j < list.size()) { if (thresholds.get(n) > list.get(j)) j++; else newRow.set(n++, (double) (list.size() - j) / list.size()); } result.add(newRow); } return result; }
From source file:edu.stanford.slac.archiverappliance.PB.data.BoundaryConditionsSimulationValueGenerator.java
/** * Get a value based on the DBR type. /*from www.j a v a 2 s . com*/ * We should check for boundary conditions here and make sure PB does not throw exceptions when we come close to MIN_ and MAX_ values * @param type * @param secondsIntoYear * @return */ public SampleValue getSampleValue(ArchDBRTypes type, int secondsIntoYear) { switch (type) { case DBR_SCALAR_STRING: return new ScalarStringSampleValue(Integer.toString(secondsIntoYear)); case DBR_SCALAR_SHORT: if (0 <= secondsIntoYear && secondsIntoYear < 1000) { // Check for some numbers around the minimum value return new ScalarValue<Short>((short) (Short.MIN_VALUE + secondsIntoYear)); } else if (1000 <= secondsIntoYear && secondsIntoYear < 2000) { // Check for some numbers around the maximum value return new ScalarValue<Short>((short) (Short.MAX_VALUE - (secondsIntoYear - 1000))); } else { // Check for some numbers around 0 return new ScalarValue<Short>((short) (secondsIntoYear - 2000)); } case DBR_SCALAR_FLOAT: if (0 <= secondsIntoYear && secondsIntoYear < 1000) { // Check for some numbers around the minimum value return new ScalarValue<Float>(Float.MIN_VALUE + secondsIntoYear); } else if (1000 <= secondsIntoYear && secondsIntoYear < 2000) { // Check for some numbers around the maximum value return new ScalarValue<Float>(Float.MAX_VALUE - (secondsIntoYear - 1000)); } else { // Check for some numbers around 0. Divide by a large number to make sure we cater to the number of precision digits return new ScalarValue<Float>((secondsIntoYear - 2000.0f) / secondsIntoYear); } case DBR_SCALAR_ENUM: return new ScalarValue<Short>((short) secondsIntoYear); case DBR_SCALAR_BYTE: return new ScalarValue<Byte>(((byte) (secondsIntoYear % 255))); case DBR_SCALAR_INT: if (0 <= secondsIntoYear && secondsIntoYear < 1000) { // Check for some numbers around the minimum value return new ScalarValue<Integer>(Integer.MIN_VALUE + secondsIntoYear); } else if (1000 <= secondsIntoYear && secondsIntoYear < 2000) { // Check for some numbers around the maximum value return new ScalarValue<Integer>(Integer.MAX_VALUE - (secondsIntoYear - 1000)); } else { // Check for some numbers around 0 return new ScalarValue<Integer>(secondsIntoYear - 2000); } case DBR_SCALAR_DOUBLE: if (0 <= secondsIntoYear && secondsIntoYear < 1000) { // Check for some numbers around the minimum value return new ScalarValue<Double>(Double.MIN_VALUE + secondsIntoYear); } else if (1000 <= secondsIntoYear && secondsIntoYear < 2000) { // Check for some numbers around the maximum value return new ScalarValue<Double>(Double.MAX_VALUE - (secondsIntoYear - 1000)); } else { // Check for some numbers around 0. Divide by a large number to make sure we cater to the number of precision digits return new ScalarValue<Double>((secondsIntoYear - 2000.0) / (secondsIntoYear * 1000000)); } case DBR_WAVEFORM_STRING: // Varying number of copies of a typical value return new VectorStringSampleValue( Collections.nCopies(secondsIntoYear, Integer.toString(secondsIntoYear))); case DBR_WAVEFORM_SHORT: return new VectorValue<Short>(Collections.nCopies(1, (short) secondsIntoYear)); case DBR_WAVEFORM_FLOAT: // Varying number of copies of a typical value return new VectorValue<Float>( Collections.nCopies(secondsIntoYear, (float) Math.cos(secondsIntoYear * Math.PI / 3600))); case DBR_WAVEFORM_ENUM: return new VectorValue<Short>(Collections.nCopies(1024, (short) secondsIntoYear)); case DBR_WAVEFORM_BYTE: // Large number of elements in the array return new VectorValue<Byte>( Collections.nCopies(65536 * secondsIntoYear, ((byte) (secondsIntoYear % 255)))); case DBR_WAVEFORM_INT: // Varying number of copies of a typical value return new VectorValue<Integer>( Collections.nCopies(secondsIntoYear, secondsIntoYear * secondsIntoYear)); case DBR_WAVEFORM_DOUBLE: // Varying number of copies of a typical value return new VectorValue<Double>( Collections.nCopies(secondsIntoYear, Math.sin(secondsIntoYear * Math.PI / 3600))); case DBR_V4_GENERIC_BYTES: // Varying number of copies of a typical value ByteBuffer buf = ByteBuffer.allocate(1024 * 10); buf.put(Integer.toString(secondsIntoYear).getBytes()); buf.flip(); return new ByteBufSampleValue(buf); default: throw new RuntimeException("We seemed to have missed a DBR type when generating sample data"); } }
From source file:org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils.java
/** * Returns a homozygous call allele list given the only allele and the ploidy. * * @param allele the only allele in the allele list. * @param ploidy the ploidy of the resulting allele list. * * @throws IllegalArgumentException if {@code allele} is {@code null} or ploidy is negative. * * @return never {@code null}.//from w w w . j a v a 2 s .c o m */ public static List<Allele> homozygousAlleleList(final Allele allele, final int ploidy) { if (allele == null || ploidy < 0) throw new IllegalArgumentException(); // Use a tailored inner class to implement the list: return Collections.nCopies(ploidy, allele); }
From source file:com.syncnapsis.data.model.Pinboard.java
@Transient @Override public List<User> getOwners() { return Collections.nCopies(1, getCreator()); }