List of usage examples for java.util.stream IntStream range
public static IntStream range(int startInclusive, int endExclusive)
From source file:io.soabase.halva.processor.caseclass.Templates.java
private void addClassTuplable(CaseClassSpec spec, TypeSpec.Builder builder, ClassName className, Settings settings) {/* w w w . j a v a2s.co m*/ CodeBlock anyVal = CodeBlock.of("$T.any()", Any.class); CodeBlock.Builder initialize = CodeBlock.builder().add("$L(", getClassTupleMethodName(className)); IntStream.range(0, spec.getItems().size()).forEach(i -> { if (i > 0) { initialize.add(", "); } initialize.add(anyVal); }); initialize.add(").getClass()"); FieldSpec.Builder fieldSpecBuilder = FieldSpec .builder(Class.class, "classTuplableClass", Modifier.PRIVATE, Modifier.STATIC, Modifier.FINAL) .initializer(initialize.build()); MethodSpec.Builder methodSpecBuilder = MethodSpec.methodBuilder("getClassTuplableClass") .addAnnotation(Override.class).addModifiers(Modifier.PUBLIC).returns(Class.class) .addCode(CodeBlock.builder().addStatement("return classTuplableClass").build()); if (settings.json) { methodSpecBuilder.addAnnotation(ClassName.get("com.fasterxml.jackson.annotation", "JsonIgnore")); } builder.addMethod(methodSpecBuilder.build()); builder.addField(fieldSpecBuilder.build()); }
From source file:edu.pitt.dbmi.ccd.anno.annotation.AnnotationController.java
private void newAnnotationDataSubData(Annotation annotation, AnnotationData parent, @Valid List<AnnotationDataForm> children) { IntStream.range(0, children.size()).forEach(i -> { final Long attributeId = children.get(i).getAttribute(); final Attribute attribute = attributeService.findById(attributeId); if (attribute == null) { throw new AttributeNotFoundException(attributeId); }/*from ww w . jav a 2 s . c o m*/ final String value = children.get(i).getValue(); final List<AnnotationDataForm> subData = children.get(i).getChildren(); AnnotationData annotationData = new AnnotationData(annotation, attribute, value); annotationData = annotationDataService.save(annotationData); if (subData.size() > 0) { newAnnotationDataSubData(annotation, annotationData, subData); } }); }
From source file:org.lightjason.agentspeak.action.builtin.TestCActionMathBlasMatrix.java
/** * test graph-laplacian//from ww w .j av a 2 s. c o m */ @Test public final void graphlaplacian() { final List<ITerm> l_return = new ArrayList<>(); new CGraphLaplacian().execute(false, IContext.EMPTYPLAN, Stream.of(new SparseDoubleMatrix2D(new double[][] { { 0, 1, 0, 0, 1, 0 }, { 1, 0, 1, 0, 1, 0 }, { 0, 1, 0, 1, 0, 0 }, { 0, 0, 1, 0, 1, 1 }, { 1, 1, 0, 1, 0, 0 }, { 0, 0, 0, 1, 0, 0 } })) .map(CRawTerm::from).collect(Collectors.toList()), l_return); Assert.assertEquals(l_return.size(), 1); final DoubleMatrix2D l_result = l_return.get(0).raw(); IntStream.range(0, l_result.rows()).boxed().map(l_result::viewRow).mapToDouble(DoubleMatrix1D::zSum) .forEach(i -> Assert.assertEquals(i, 0, 0)); IntStream.range(0, l_result.columns()).boxed().map(l_result::viewColumn).mapToDouble(DoubleMatrix1D::zSum) .forEach(i -> Assert.assertEquals(i, 0, 0)); }
From source file:org.eclipse.hawkbit.mgmt.rest.resource.MgmtDistributionSetResourceTest.java
@Test @Description("Ensures that the 'max actions per target' quota is enforced if the distribution set assignment of a target is changed permanently") public void changeDistributionSetAssignmentForTargetUntilQuotaIsExceeded() throws Exception { // create one target final Target testTarget = testdataFactory.createTarget("trg1"); final int maxActions = quotaManagement.getMaxActionsPerTarget(); // create a set of distribution sets final DistributionSet ds1 = testdataFactory.createDistributionSet("ds1"); final DistributionSet ds2 = testdataFactory.createDistributionSet("ds2"); final DistributionSet ds3 = testdataFactory.createDistributionSet("ds3"); IntStream.range(0, maxActions).forEach(i -> { // toggle the distribution set assignDistributionSet(i % 2 == 0 ? ds1 : ds2, testTarget); });//from w ww . ja v a2s . com // assign our test target to another distribution set and verify that // the 'max actions per target' quota is exceeded final String json = new JSONArray().put(new JSONObject().put("id", testTarget.getControllerId())) .toString(); mvc.perform( post(MgmtRestConstants.DISTRIBUTIONSET_V1_REQUEST_MAPPING + "/" + ds3.getId() + "/assignedTargets") .contentType(MediaType.APPLICATION_JSON).content(json)) .andExpect(status().isForbidden()); }
From source file:org.ligoj.app.plugin.vm.aws.VmAwsSnapshotResource.java
/** * Convert a XML AMI node to a {@link Snapshot} instance. *//*w w w . j a v a 2 s. c o m*/ private Snapshot toAmi(final Element element) { final Snapshot snapshot = new Snapshot(); snapshot.setId(xml.getTagText(element, "imageId")); snapshot.setName(xml.getTagText(element, "name")); snapshot.setDescription(StringUtils.trimToNull(xml.getTagText(element, "description"))); snapshot.setStatusText(xml.getTagText(element, "imageState")); snapshot.setAvailable("available".equals(snapshot.getStatusText())); snapshot.setPending("pending".equals(snapshot.getStatusText())); final String date = xml.getTagText(element, "creationDate"); final XPath xPath = xml.xpathFactory.newXPath(); try { // Author final NodeList tags = (NodeList) xPath.compile("tagSet/item").evaluate(element, XPathConstants.NODESET); snapshot.setAuthor(IntStream.range(0, tags.getLength()).mapToObj(tags::item) .filter(t -> TAG_AUDIT.equals(xml.getTagText((Element) t, "key"))) .map(t -> xml.getTagText((Element) t, "value")).map(this::getUser).findAny().orElse(null)); // Volumes final NodeList volumes = (NodeList) xPath.compile("blockDeviceMapping/item").evaluate(element, XPathConstants.NODESET); snapshot.setVolumes(IntStream.range(0, volumes.getLength()).mapToObj(volumes::item) .map(v -> toVolume((Element) v)).filter(v -> v.getId() != null).collect(Collectors.toList())); // Creation date snapshot.setDate(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX").parse(date)); } catch (final Exception pe) { // Invalid of not correctly managed XML content snapshot.setVolumes(ListUtils.emptyIfNull(snapshot.getVolumes())); snapshot.setDate(new Date(0)); log.info("Details of AMI {} cannot be fully parsed", snapshot.getId(), pe); } return snapshot; }
From source file:com.github.aptd.simulation.datamodel.CXMLReader.java
/** * create the train list//from w ww. java 2s . c o m * * @param p_network network component * @param p_agents map with agent asl scripts * @param p_factory factory * @return unmodifiable map with trains */ private static Pair<Map<String, ITrain<?>>, Map<String, IDoor<?>>> train(final Network p_network, final Map<String, String> p_agents, final IFactory p_factory, final ITime p_time, final double p_minfreetimetoclose) { final String l_dooragent = IStatefulElement.getDefaultAsl("door"); final Map<String, IElement.IGenerator<ITrain<?>>> l_generators = new ConcurrentHashMap<>(); final Set<IAction> l_actions = CCommon.actionsFromPackage().collect(Collectors.toSet()); final IElement.IGenerator<IDoor<?>> l_doorgenerator = doorgenerator(p_factory, l_dooragent, l_actions, p_time); final Map<String, AtomicLong> l_doorcount = Collections.synchronizedMap(new HashMap<>()); final Map<String, IDoor<?>> l_doors = Collections.synchronizedMap(new HashMap<>()); return new ImmutablePair<>( Collections.<String, ITrain<?>>unmodifiableMap( p_network.getTimetable().getTrains().getTrain().parallelStream() .filter(i -> hasagentname(i.getAny3())).map(i -> agentname(i, i.getAny3())) .map(i -> l_generators .computeIfAbsent(i.getRight(), a -> traingenerator(p_factory, p_agents.get(i.getRight()), l_actions, p_time)) .generatesingle(i.getLeft().getId(), i.getLeft().getTrainPartSequence().stream().flatMap(ref -> { // @todo support multiple train parts final EOcpTT[] l_tts = ((ETrainPart) ref.getTrainPartRef() .get(0).getRef()).getOcpsTT().getOcpTT() .toArray(new EOcpTT[0]); final CTrain.CTimetableEntry[] l_entries = new CTrain.CTimetableEntry[l_tts.length]; for (int j = 0; j < l_tts.length; j++) { final EArrivalDepartureTimes l_times = l_tts[j].getTimes() .stream() .filter(t -> t.getScope() .equalsIgnoreCase("published")) .findAny().orElseThrow(() -> new CSemanticException( "missing published times")); l_entries[j] = new CTrain.CTimetableEntry( j < 1 ? 0.0 : ((ETrack) l_tts[j - 1].getSectionTT() .getTrackRef().get(0).getRef()) .getTrackTopology() .getTrackEnd().getPos() .doubleValue(), ((EOcp) l_tts[j].getOcpRef()).getId(), l_tts[j].getStopDescription().getOtherAttributes() .getOrDefault(PLATFORM_REF_ATTRIBUTE, null), l_times.getArrival() == null ? null : l_times.getArrival().toGregorianCalendar() .toZonedDateTime() .with(LocalDate.from(p_time .current() .atZone(ZoneId .systemDefault()))) .toInstant(), l_times.getDeparture() == null ? null : l_times.getDeparture() .toGregorianCalendar() .toZonedDateTime() .with(LocalDate.from(p_time .current() .atZone(ZoneId .systemDefault()))) .toInstant()); } return Arrays.stream(l_entries); }), i.getLeft().getTrainPartSequence().stream() // @todo support multiple train parts .map(s -> (ETrainPart) s.getTrainPartRef().get(0).getRef()) .map(p -> (EFormation) p.getFormationTT().getFormationRef()) .flatMap(f -> f.getTrainOrder().getVehicleRef().stream()) .map(r -> new ImmutablePair<BigInteger, TDoors>( r.getVehicleCount(), ((EVehicle) r.getVehicleRef()).getWagon() .getPassenger().getDoors())) .flatMap(v -> IntStream .range(0, v.getLeft().intValue() * v.getRight() .getNumber().intValue()) .mapToObj(j -> l_doors.computeIfAbsent("door-" + i.getLeft().getId() + "-" + l_doorcount .computeIfAbsent(i.getLeft() .getId(), id -> new AtomicLong(1L)) .getAndIncrement(), id -> l_doorgenerator.generatesingle(id, i.getLeft().getId(), v.getRight().getEntranceWidth() .doubleValue() / v.getRight().getNumber() .longValue(), p_minfreetimetoclose)))) .collect(Collectors.toList()))) .collect(Collectors.toMap(IElement::id, i -> i))), l_doors); }
From source file:org.apache.tinkerpop.gremlin.server.GremlinDriverIntegrateTest.java
@Test public void shouldAvoidDeadlockOnCallToResultSetDotAll() throws Exception { // This test arose from this issue: https://github.org/apache/tinkerpop/tinkerpop3/issues/515 ///*from w w w . j a va2 s. c o m*/ // ResultSet.all returns a CompletableFuture that blocks on the worker pool until isExhausted returns false. // isExhausted in turn needs a thread on the worker pool to even return. So its totally possible to consume all // threads on the worker pool waiting for .all to finish such that you can't even get one to wait for // isExhausted to run. // // Note that all() doesn't work as described above anymore. It waits for callback on readComplete rather // than blocking on isExhausted. final int workerPoolSizeForDriver = 2; // the number of requests 4 times the size of the worker pool as this originally did produce the problem // described above in the javadoc of the test (though an equivalent number also produced it), but this has // been tested to much higher multiples and passes. note that the maxWaitForConnection setting is high so // that the client doesn't timeout waiting for an available connection. obviously this can also be fixed // by increasing the maxConnectionPoolSize. final int requests = workerPoolSizeForDriver * 4; final Cluster cluster = Cluster.build().workerPoolSize(workerPoolSizeForDriver).maxWaitForConnection(300000) .create(); final Client client = cluster.connect(); final CountDownLatch latch = new CountDownLatch(requests); final AtomicReference[] refs = new AtomicReference[requests]; IntStream.range(0, requests).forEach(ix -> { refs[ix] = new AtomicReference(); client.submitAsync("Thread.sleep(5000);[1,2,3,4,5,6,7,8,9]") .thenAccept(rs -> rs.all().thenAccept(refs[ix]::set).thenRun(latch::countDown)); }); // countdown should have reached zero as results should have eventually been all returned and processed assertTrue(latch.await(20, TimeUnit.SECONDS)); final List<Integer> expected = IntStream.range(1, 10).boxed().collect(Collectors.toList()); IntStream.range(0, requests).forEach(r -> assertTrue(expected.containsAll(((List<Result>) refs[r].get()) .stream().map(resultItem -> new Integer(resultItem.getInt())).collect(Collectors.toList())))); }
From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java
public void testConcurrentCreateNewLog(boolean entryLogFilePreAllocationEnabled) throws Exception { ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); // Creating a new configuration with a number of // ledger directories. conf.setLedgerDirNames(ledgerDirs);/*from w ww. j a v a 2s . c o m*/ conf.setEntryLogFilePreAllocationEnabled(entryLogFilePreAllocationEnabled); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger el = new EntryLogger(conf, ledgerDirsManager); EntryLogManagerBase entryLogManager = (EntryLogManagerBase) el.getEntryLogManager(); // set same thread executor for entryLoggerAllocator's allocatorExecutor setSameThreadExecutorForEntryLoggerAllocator(el.getEntryLoggerAllocator()); Assert.assertEquals("previousAllocatedEntryLogId after initialization", -1, el.getPreviousAllocatedEntryLogId()); Assert.assertEquals("leastUnflushedLogId after initialization", 0, el.getLeastUnflushedLogId()); int createNewLogNumOfTimes = 10; AtomicBoolean receivedException = new AtomicBoolean(false); IntStream.range(0, createNewLogNumOfTimes).parallel().forEach((i) -> { try { (entryLogManager).createNewLog((long) i); } catch (IOException e) { LOG.error("Received exception while creating newLog", e); receivedException.set(true); } }); Assert.assertFalse("There shouldn't be any exceptions while creating newlog", receivedException.get()); int expectedPreviousAllocatedEntryLogId = createNewLogNumOfTimes - 1; if (entryLogFilePreAllocationEnabled) { expectedPreviousAllocatedEntryLogId = createNewLogNumOfTimes; } Assert.assertEquals( "previousAllocatedEntryLogId after " + createNewLogNumOfTimes + " number of times createNewLog is called", expectedPreviousAllocatedEntryLogId, el.getPreviousAllocatedEntryLogId()); Assert.assertEquals("Number of RotatedLogChannels", createNewLogNumOfTimes - 1, entryLogManager.getRotatedLogChannels().size()); }
From source file:org.apache.nifi.processors.standard.AbstractQueryDatabaseTable.java
protected String getQuery(DatabaseAdapter dbAdapter, String tableName, String sqlQuery, String columnNames, List<String> maxValColumnNames, String customWhereClause, Map<String, String> stateMap) { if (StringUtils.isEmpty(tableName)) { throw new IllegalArgumentException("Table name must be specified"); }/*from w w w . j a v a2s. c o m*/ final StringBuilder query; if (StringUtils.isEmpty(sqlQuery)) { query = new StringBuilder(dbAdapter.getSelectStatement(tableName, columnNames, null, null, null, null)); } else { query = getWrappedQuery(dbAdapter, sqlQuery, tableName); } List<String> whereClauses = new ArrayList<>(); // Check state map for last max values if (stateMap != null && !stateMap.isEmpty() && maxValColumnNames != null) { IntStream.range(0, maxValColumnNames.size()).forEach((index) -> { String colName = maxValColumnNames.get(index); String maxValueKey = getStateKey(tableName, colName, dbAdapter); String maxValue = stateMap.get(maxValueKey); if (StringUtils.isEmpty(maxValue)) { // If we can't find the value at the fully-qualified key name, it is possible (under a previous scheme) // the value has been stored under a key that is only the column name. Fall back to check the column name; either way, when a new // maximum value is observed, it will be stored under the fully-qualified key from then on. maxValue = stateMap.get(colName.toLowerCase()); } if (!StringUtils.isEmpty(maxValue)) { Integer type = columnTypeMap.get(maxValueKey); if (type == null) { // This shouldn't happen as we are populating columnTypeMap when the processor is scheduled. throw new IllegalArgumentException("No column type found for: " + colName); } // Add a condition for the WHERE clause whereClauses.add(colName + (index == 0 ? " > " : " >= ") + getLiteralByType(type, maxValue, dbAdapter.getName())); } }); } if (customWhereClause != null) { whereClauses.add("(" + customWhereClause + ")"); } if (!whereClauses.isEmpty()) { query.append(" WHERE "); query.append(StringUtils.join(whereClauses, " AND ")); } return query.toString(); }
From source file:com.yahoo.bullet.drpc.FilterBoltTest.java
@Test public void testGroupAllCount() { bolt = ComponentUtils.prepare(new ExpiringFilterBolt(), collector); Tuple rule = makeIDTuple(TupleType.Type.RULE_TUPLE, 42L, makeGroupFilterRule("timestamp", Arrays.asList("1", "2"), EQUALS, AggregationType.GROUP, 1, singletonList(new GroupOperation(COUNT, null, "cnt")))); bolt.execute(rule);//from ww w .j ava2 s . c o m BulletRecord record = RecordBox.get().add("timestamp", "1").getRecord(); Tuple matching = makeTuple(TupleType.Type.RECORD_TUPLE, record); IntStream.range(0, 10).forEach(i -> bolt.execute(matching)); BulletRecord another = RecordBox.get().getRecord(); Tuple nonMatching = makeTuple(TupleType.Type.RECORD_TUPLE, another); IntStream.range(0, 5).forEach(i -> bolt.execute(nonMatching)); bolt.execute(nonMatching); // Two to flush bolt Tuple tick = TupleUtils.makeTuple(TupleType.Type.TICK_TUPLE); bolt.execute(tick); bolt.execute(tick); Assert.assertEquals(collector.getEmittedCount(), 1); GroupData actual = GroupData.fromBytes(getRawPayloadOfNthTuple(1)); BulletRecord expected = RecordBox.get().add("cnt", 10).getRecord(); Assert.assertTrue(isEqual(actual, expected)); }