Example usage for java.util.stream IntStream rangeClosed

List of usage examples for java.util.stream IntStream rangeClosed

Introduction

In this page you can find the example usage for java.util.stream IntStream rangeClosed.

Prototype

public static IntStream rangeClosed(int startInclusive, int endInclusive) 

Source Link

Document

Returns a sequential ordered IntStream from startInclusive (inclusive) to endInclusive (inclusive) by an incremental step of 1 .

Usage

From source file:org.ow2.proactive.connector.iaas.cloud.provider.jclouds.openstack.OpenstackJCloudsProvider.java

@Override
public Set<Instance> createInstance(Infrastructure infrastructure, Instance instance) {

    ComputeService computeService = getComputeServiceFromInfastructure(infrastructure);

    NovaApi novaApi = computeService.getContext().unwrapApi((NovaApi.class));

    ServerApi serverApi = novaApi.getServerApi(region);

    CreateServerOptions serverOptions = createOptions(instance);

    return IntStream.rangeClosed(1, Integer.valueOf(instance.getNumber()))
            .mapToObj(i -> createOpenstackInstance(instance, serverApi, serverOptions))
            .map(server -> instanceCreatorFromNodeMetadata.apply(server, infrastructure.getId()))
            .collect(Collectors.toSet());

}

From source file:io.pivotal.demo.smartgrid.frontend.timeseries.AggregateCounterTimeSeriesRepository.java

@Override
public Map<String, TimeSeriesCollection> getTimeSeriesData(TimeSeriesDataRequest dataRequest) {

    int houseId = dataRequest.getHouseId();

    IntStream houseNumStream = houseId == GRID_HOUSE_ID ? IntStream.rangeClosed(HOUSE_ID_MIN, HOUSE_ID_MAX)
            : IntStream.of(houseId);

    List<AggregateCounterCollection> aggregateCounterCollections = houseNumStream.parallel()
            .mapToObj(i -> new TimeSeriesDataRequest(dataRequest, i)).map(this::fetchAggregateCounterData)
            .filter(acc -> acc != null && !acc.getAggregateCounters().isEmpty()).collect(Collectors.toList());

    Map<String, TimeSeriesCollection> result = new HashMap<>();
    for (AggregateCounterCollection acc : aggregateCounterCollections) {

        TimeSeriesCollection tsc = convertToTimeSeriesCollection(acc);
        result.put(tsc.getName(), tsc);/*  ww  w .ja  v a2 s.c o m*/
    }

    TimeSeriesCollection totalGridTimeSeriesCollection = aggreagteGridTotalTimeSeries(result);

    result.put("h_-1", totalGridTimeSeriesCollection);

    return result;
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.CounterConcurrentWaterfallTest.java

@Test
public void test5CounterConcurrentWaterfall(TestContext context) throws Exception {
    Async async = context.async();//from  www  .  j  a  v a 2 s  .  com
    HanyuPinyin.convert("");//warm up
    StopWatch sw = new StopWatch();
    sw.start();
    int records = 100;
    org.simondean.vertx.async.Async.<Long>series().task(customerRepository::totalCount).task(t -> {
        try {
            JsonNode source = JsonLoader.fromResource("/Customer.json");
            AtomicLong counter = new AtomicLong(0);
            IntStream.rangeClosed(1, records).parallel().forEach(e -> {
                JsonObject clone = new JsonObject(Json.encode(source));
                clone.getJsonObject("personalDetails").put("phoneNumber",
                        ((Long.parseLong(clone.getJsonObject("personalDetails").getString("phoneNumber"))
                                + 10000 + e) + ""));
                org.simondean.vertx.async.Async.waterfall().<String>task(tt -> {
                    customerRepository.create(Json.encode(clone), tt);
                }).<Customer>task((id, tt) -> {
                    customerRepository.get(id, tt);
                }).run((AsyncResult<Customer> r) -> {
                    long ct = counter.incrementAndGet();
                    //                logger.info("Counter = " + ct + " | success = " + !r.failed());
                    if (r.succeeded()) {
                        try {
                            Customer loaded = r.result();
                            Customer c = Json.decodeValue(clone.encode(), Customer.class);
                            c.setId(loaded.getId());
                            c.getAddressDetails().setId(loaded.getId());
                            c.getPersonalDetails().setId(loaded.getId());
                            String encoded = Json.encode(c);
                            if (!r.result().equals(encoded)) {
                                logger.info(loaded.getId() + " - SOURCE : " + encoded);
                                logger.info(loaded.getId() + " - RESULT : " + r.result());
                            }
                            context.assertEquals(Json.encode(r.result()), encoded);
                        } catch (Exception ex) {
                            t.handle(Future.failedFuture(ex));
                        }
                    } else {
                        t.handle(Future.failedFuture(r.cause()));
                    }
                    if (ct == records) {
                        t.handle(Future.succeededFuture(ct));
                    }
                });
            });
        } catch (IOException e) {
            t.handle(Future.failedFuture(e));
        }
    }).task(customerRepository::totalCount).run(r -> {
        if (r.succeeded()) {
            context.assertEquals(r.result().get(0) + r.result().get(1), r.result().get(2));
            sw.stop();
            logger.info("test count: time to count then concurrently save and get " + records
                    + " customer records and count again: " + sw.getTime());
            async.complete();
        } else {
            context.fail(r.cause());
            async.complete();
        }
    });
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.SaveAndSearchAndGetCallInConcurrentTest.java

@Test
public void test2SaveAndSearchAndGetCallIn(TestContext context) throws Exception {
    Async async = context.async();/*from w w w .ja  v  a 2  s . c  o m*/
    JsonNode source = JsonLoader.fromResource("/CallIn.json");
    int records = 1000;
    AtomicLong total = new AtomicLong(0);
    ConcurrentHashMap<JsonObject, String> m = new ConcurrentHashMap<>();
    Stream<JsonObject> stream = IntStream.rangeClosed(0, records).mapToObj(e -> {
        JsonObject clone = new JsonObject(Json.encode(source));
        Long number = Long.parseLong(clone.getString("phoneNumber")) + e;
        clone.put("phoneNumber", number + "");
        Long callTime = clone.getLong("callTime") + e;
        clone.put("callTime", callTime);
        return clone;
    });
    StopWatch sw = new StopWatch();
    sw.start();
    stream.parallel().forEach(e -> {
        org.simondean.vertx.async.Async.waterfall().<String>task(t -> {
            callInRepository.create(Json.encode(e), t);
        }).<List<CallIn>>task((id, t) -> {
            m.put(e, id);
            AtomicLong idc = new AtomicLong(0);
            org.simondean.vertx.async.Async.retry().<List<CallIn>>task(tt -> {
                callInRepository.searchIndexByScoreAndGet("callTime", e.getDouble("callTime"),
                        e.getDouble("callTime"), 0, 1, ttt -> {
                            logger.info("id = " + id + " | retry count: " + idc.incrementAndGet());
                            tt.handle(ttt.succeeded() && ttt.result() != null && !ttt.result().isEmpty()
                                    ? Future.succeededFuture(ttt.result())
                                    : Future.failedFuture(ttt.cause()));
                        });
            }).times(100000).run(t);
        }).run(r -> {
            context.assertTrue(r.succeeded());
            if (r.succeeded()) {
                context.assertFalse(r.result().isEmpty());
                context.assertEquals(1, r.result().size());
                CallIn ci = r.result().iterator().next();
                context.assertNotNull(ci);
                logger.info(Json.encode(ci));
                CallIn cii = Json.decodeValue(e.put("id", m.get(e)).encode(), CallIn.class);
                context.assertEquals(Json.encode(cii), Json.encode(ci));
            }
            long t;
            if ((t = total.incrementAndGet()) == records) {
                sw.stop();
                logger.info("time to concurrently save and search and get " + records + " call in records: "
                        + sw.getTime());
                async.complete();
            } else {
                logger.info("t = " + t);
            }
        });
    });

}

From source file:io.github.azige.bbs.web.controller.TopicController.java

@RequestMapping(value = "/topic", method = RequestMethod.GET)
public String topicListView(@PageableDefault(sort = "id", direction = Direction.DESC) Pageable pagable,
        Model model) {// w w  w .ja  v  a 2s.c  om
    Page<Topic> page = topicRepository.findAll(pagable);
    List<Topic> topics = page.getContent();
    model.addAttribute("topics", topics);
    int firstPageNumber = page.getNumber() - 3;
    int lastPageNumber = page.getNumber() + 3;
    if (firstPageNumber < 0) {
        firstPageNumber = 0;
        lastPageNumber = Math.min(firstPageNumber + 7, page.getTotalPages() - 1);
    } else if (lastPageNumber >= page.getTotalPages()) {
        lastPageNumber = page.getTotalPages() - 1;
        firstPageNumber = Math.max(lastPageNumber - 7, 0);
    }
    model.addAttribute("pageNumbers", IntStream.rangeClosed(firstPageNumber, lastPageNumber).toArray());
    model.addAttribute("page", page);
    return "topic-list";
}

From source file:org.matsim.contrib.drt.util.stats.DrtVehicleOccupancyProfileWriter.java

@Override
public void notifyMobsimBeforeCleanup(@SuppressWarnings("rawtypes") MobsimBeforeCleanupEvent e) {
    DrtVehicleOccupancyProfileCalculator calculator = new DrtVehicleOccupancyProfileCalculator(fleet, 300);

    TimeDiscretizer timeDiscretizer = calculator.getTimeDiscretizer();
    calculator.calculate();//from   w ww  .  j av a 2s.c  om

    String file = matsimServices.getControlerIO().getIterationFilename(matsimServices.getIterationNumber(),
            OUTPUT_FILE);
    String timeFormat = timeDiscretizer.getTimeInterval() % 60 == 0 ? Time.TIMEFORMAT_HHMM
            : Time.TIMEFORMAT_HHMMSS;

    try (CompactCSVWriter writer = new CompactCSVWriter(IOUtils.getBufferedWriter(file + ".txt"))) {
        String[] paxHeader = IntStream.rangeClosed(0, calculator.getMaxCapacity()).mapToObj(i -> i + " pax")
                .toArray(String[]::new);
        writer.writeNext("time", "stay", paxHeader);

        for (int i = 0; i < timeDiscretizer.getIntervalCount(); i++) {
            int time = i * timeDiscretizer.getTimeInterval();
            String idleVehicles = calculator.getIdleVehicleProfile()[i] + "";
            writer.writeNext(Time.writeTime(time, timeFormat), idleVehicles,
                    getOccupancyValues(calculator.getVehicleOccupancyProfiles(), i));
        }
    }

    DefaultTableXYDataset createXYDataset = createXYDataset(calculator);
    generateImage(createXYDataset, TimeProfileCharts.ChartType.Line);
    generateImage(createXYDataset, TimeProfileCharts.ChartType.StackedArea);
}

From source file:io.yields.math.concepts.operator.Smoothness.java

private RealMatrix computeDistance(List<Tuple> normalizedData) {
    RealMatrix a = new Array2DRowRealMatrix(normalizedData.size(), order + 1);
    RealMatrix b = new Array2DRowRealMatrix(normalizedData.size(), 1);
    int row = 0;/*from  w  w w.ja  va2 s . c  o  m*/
    for (Tuple tuple : normalizedData) {
        final int currentRow = row;
        IntStream.rangeClosed(0, order)
                .forEach(power -> a.setEntry(currentRow, power, Math.pow(tuple.getX(), power)));
        b.setEntry(currentRow, 0, tuple.getY());
        row++;
    }
    DecompositionSolver solver = new QRDecomposition(a, EPS).getSolver();
    if (solver.isNonSingular() && !isConstant(b)) {
        RealMatrix solution = solver.solve(b);
        return a.multiply(solution).subtract(b);
    } else {
        return new Array2DRowRealMatrix(normalizedData.size(), 1);
    }
}

From source file:io.github.pellse.decorator.util.reflection.ReflectionUtils.java

public static <T, U> T newInstance(Class<T> clazz, U argToInsert, Object[] args, Class<?>[] argTypes) {
    return IntStream.rangeClosed(0, args.length)
            .mapToObj(i -> CheckedSupplier.of(() -> newInstance(clazz, argToInsert, i, args, argTypes))
                    .toOptional())//from w w  w  .  ja v  a 2 s.  c o  m
            .flatMap(opt -> opt.map(Stream::of).orElseGet(Stream::empty)).findFirst()
            .orElseGet(CheckedSupplier.of(() -> invokeConstructor(clazz, args, argTypes)));
}

From source file:ru.jts_dev.common.id.impl.bitset.BitSetIdPoolTest.java

@DirtiesContext
@Test/*from  w ww .j av  a  2s  .c  o m*/
public void throwsExceptionIfNoFreeIndexes() {
    Throwable exception = expectThrows(AllocationException.class,
            () -> IntStream.rangeClosed(0, 20000).parallel().forEach(value -> idPool.borrow()));
    assertThat(exception.getMessage()).isEqualTo("No available indexes in pool");
}

From source file:org.ow2.proactive.connector.iaas.cloud.provider.vmware.VMWareProvider.java

@Override
public Set<Instance> createInstance(Infrastructure infrastructure, Instance instance) {

    String image = instance.getImage();
    Folder rootFolder = vmWareServiceInstanceCache.getServiceInstance(infrastructure).getRootFolder();
    String instanceImageId = getInstanceIdFromImage(image);

    VirtualMachineRelocateSpec relocateSpecs = inferRelocateSpecsFromImageArgument(image, rootFolder);
    Folder destinationFolder = getDestinationFolderFromImage(image, rootFolder);
    VirtualMachine vmToClone = getVirtualMachineByNameOrUUID(instanceImageId, rootFolder);

    return IntStream.rangeClosed(1, Integer.valueOf(instance.getNumber()))
            .mapToObj(instanceIndexStartAt1 -> cloneVM(vmToClone,
                    createUniqInstanceTag(instance.getTag(), instanceIndexStartAt1), instance, rootFolder,
                    createVirtualMachineCloneSpec(instanceIndexStartAt1, vmToClone, relocateSpecs, instance),
                    destinationFolder))//from ww  w . j a v  a2  s .  c om
            .map(vm -> instance.withId(vm.getConfig().getUuid())).collect(Collectors.toSet());
}