Example usage for java.util.stream IntStream range

List of usage examples for java.util.stream IntStream range

Introduction

In this page you can find the example usage for java.util.stream IntStream range.

Prototype

public static IntStream range(int startInclusive, int endExclusive) 

Source Link

Document

Returns a sequential ordered IntStream from startInclusive (inclusive) to endExclusive (exclusive) by an incremental step of 1 .

Usage

From source file:com.mycompany.wolf.Room.java

private void notifyDayBreak() {
    hunterKillings.values().stream().map(hunterKilling -> hunterKilling.playerId).forEach(newlyDead::add);

    dead.addAll(newlyDead);//from  ww w  . j av  a2  s.c  om

    firstTurn = IntStream.range(0, sessions.size())
            .filter(i -> Objects.equals(theVoted, getPlayerId(sessions.get(i)))).findAny().orElse(0);
    turnOffset = 0;

    Map<String, Object> notifyDead = ImmutableMap.of("code", "notifyDayBreak", "properties",
            ImmutableMap.of("dead", dead, "newlyDead", newlyDead));
    String jsonText = JsonUtils.toString(notifyDead);
    sessions.stream().forEach(s -> s.getAsyncRemote().sendText(jsonText));

    notifyNextTurn();
}

From source file:org.ligoj.app.plugin.vm.aws.VmAwsPluginResource.java

/**
 * Log the instance state transition and indicates the transition was a success.
 *
 * @param response//from  w w w .jav  a 2s  .c o  m
 *            the EC2 response markup.
 * @return <code>true</code> when the transition succeed.
 */
private boolean logTransitionState(final String response)
        throws XPathExpressionException, SAXException, IOException, ParserConfigurationException {
    final NodeList items = xml.getXpath(ObjectUtils.defaultIfNull(response, "<a></a>"),
            "/*[contains(local-name(),'InstancesResponse')]/instancesSet/item");
    return IntStream.range(0, items.getLength()).mapToObj(items::item).map(n -> (Element) n)
            .peek(e -> log.info("Instance {} goes from {} to {} state", xml.getTagText(e, "instanceId"),
                    getEc2State(e, "previousState"), getEc2State(e, "currentState")))
            .findFirst().isPresent();

}

From source file:com.uber.hoodie.common.util.TestCompactionUtils.java

/**
 * Validates if generated compaction operation matches with input file slice and partition path
 *
 * @param slice            File Slice//from w  w w. j  a va2  s.c  o  m
 * @param op               HoodieCompactionOperation
 * @param expPartitionPath Partition path
 */
private void testFileSliceCompactionOpEquality(FileSlice slice, HoodieCompactionOperation op,
        String expPartitionPath) {
    Assert.assertEquals("Partition path is correct", expPartitionPath, op.getPartitionPath());
    Assert.assertEquals("Same base-instant", slice.getBaseInstantTime(), op.getBaseInstantTime());
    Assert.assertEquals("Same file-id", slice.getFileId(), op.getFileId());
    if (slice.getDataFile().isPresent()) {
        Assert.assertEquals("Same data-file", slice.getDataFile().get().getPath(), op.getDataFilePath());
    }
    List<String> paths = slice.getLogFiles().map(l -> l.getPath().toString()).collect(Collectors.toList());
    IntStream.range(0, paths.size()).boxed().forEach(idx -> {
        Assert.assertEquals("Log File Index " + idx, paths.get(idx), op.getDeltaFilePaths().get(idx));
    });
    Assert.assertEquals("Metrics set", metrics, op.getMetrics());
}

From source file:org.lightjason.agentspeak.action.builtin.TestCActionMathStatistics.java

/**
 * test exponential selection with lazy parameter
 *///  ww  w.  ja  v a2  s  .c  o m
@Test
public final void exponentialselectionlazy() {
    final List<ITerm> l_return = Collections.synchronizedList(new ArrayList<>());

    IntStream.range(0, 6500).parallel()
            .forEach(i -> new CExponentialSelection().execute(false, IContext.EMPTYPLAN,
                    Stream.of(Stream.of("a", "b").collect(Collectors.toList()),
                            Stream.of(4.5, 3.5).collect(Collectors.toList()), 0.5).map(CRawTerm::from)
                            .collect(Collectors.toList()),
                    l_return));

    Assert.assertEquals(
            (double) Collections.frequency(l_return.stream().map(ITerm::raw).collect(Collectors.toList()), "a")
                    / l_return.size(),
            0.73, 0.2);

    Assert.assertEquals(
            (double) Collections.frequency(l_return.stream().map(ITerm::raw).collect(Collectors.toList()), "b")
                    / l_return.size(),
            0.27, 0.2);
}

From source file:com.streamsets.pipeline.stage.origin.jdbc.AbstractTableJdbcSource.java

@Override
public void produce(Map<String, String> lastOffsets, int maxBatchSize) throws StageException {
    int batchSize = Math.min(maxBatchSize, commonSourceConfigBean.maxBatchSize);
    handleLastOffset(new HashMap<>(lastOffsets));
    try {//from w  w  w . j  a  va2  s .  co m
        executorService = new SafeScheduledExecutorService(numberOfThreads,
                TableJdbcRunnable.TABLE_JDBC_THREAD_PREFIX);

        ExecutorCompletionService<Future> completionService = new ExecutorCompletionService<>(executorService);

        final RateLimiter queryRateLimiter = commonSourceConfigBean.creatQueryRateLimiter();

        List<Future> allFutures = new LinkedList<>();
        IntStream.range(0, numberOfThreads).forEach(threadNumber -> {
            JdbcBaseRunnable runnable = new JdbcRunnableBuilder().context(getContext())
                    .threadNumber(threadNumber).batchSize(batchSize).connectionManager(connectionManager)
                    .offsets(offsets).tableProvider(tableOrderProvider)
                    .tableReadContextCache(getTableReadContextCache(connectionManager, offsets))
                    .commonSourceConfigBean(commonSourceConfigBean).tableJdbcConfigBean(tableJdbcConfigBean)
                    .queryRateLimiter(commonSourceConfigBean.creatQueryRateLimiter()).isReconnect(isReconnect)
                    .build();

            toBeInvalidatedThreadCaches.add(runnable.getTableReadContextCache());
            allFutures.add(completionService.submit(runnable, null));
        });

        if (commonSourceConfigBean.allowLateTable) {
            TableSpooler tableSpooler = new TableSpooler();
            executorServiceForTableSpooler = new SafeScheduledExecutorService(1,
                    JdbcBaseRunnable.TABLE_JDBC_THREAD_PREFIX);
            executorServiceForTableSpooler.scheduleWithFixedDelay(tableSpooler, 0,
                    commonSourceConfigBean.newTableQueryInterval, TimeUnit.SECONDS);
        }

        while (!getContext().isStopped()) {
            checkWorkerStatus(completionService);
            final boolean shouldGenerate = tableOrderProvider.shouldGenerateNoMoreDataEvent();
            if (shouldGenerate) {
                final int delay = commonSourceConfigBean.noMoreDataEventDelay;
                if (delay > 0) {
                    Executors.newSingleThreadScheduledExecutor().schedule(new Runnable() {
                        @Override
                        public void run() {
                            jdbcUtil.generateNoMoreDataEvent(getContext());
                        }
                    }, delay, TimeUnit.SECONDS);
                } else {
                    jdbcUtil.generateNoMoreDataEvent(getContext());
                }
            }

            // This loop is only a checker for isStopped() -> hence running it as fast as possible leads to high CPU
            // usage even for no-data passing through use case. We're currently hard coding the sleep for few milliseconds.
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                LOG.debug("Interrupted wait");
            }
        }

        for (Future future : allFutures) {
            try {
                future.get();
            } catch (ExecutionException e) {
                LOG.error(
                        "ExecutionException when attempting to wait for all table JDBC runnables to complete, after context was"
                                + " stopped: {}",
                        e.getMessage(), e);
            } catch (InterruptedException e) {
                LOG.error(
                        "InterruptedException when attempting to wait for all table JDBC runnables to complete, after context "
                                + "was stopped: {}",
                        e.getMessage(), e);
                Thread.currentThread().interrupt();
            }
        }
    } finally {
        if (shutdownExecutorIfNeeded()) {
            Thread.currentThread().interrupt();
        }
    }
}

From source file:delfos.rs.trustbased.WeightedGraph.java

private static void validateWeightMatrix(double[][] weightMatrix) {

    List<Double> wrongValues = IntStream.range(0, weightMatrix.length).boxed().map(row -> weightMatrix[row])
            .flatMap(row -> IntStream.range(0, row.length).boxed().map(collumn -> row[collumn]))
            .filter(value -> (value > 1.0) || (value < 0.0)).collect(Collectors.toList());

    if (!wrongValues.isEmpty()) {
        throw new IllegalStateException(
                "Value must be given in [0,1] interval and it was: " + wrongValues.toString());
    }/*from w  w  w  .  ja  va  2s . c om*/
}

From source file:org.everit.json.schema.loader.SchemaLoader.java

private ObjectSchema.Builder buildObjectSchema() {
    ObjectSchema.Builder builder = ObjectSchema.builder();
    ifPresent("minProperties", Integer.class, builder::minProperties);
    ifPresent("maxProperties", Integer.class, builder::maxProperties);
    if (schemaJson.has("properties")) {
        JSONObject propertyDefs = schemaJson.getJSONObject("properties");
        Arrays.stream(Optional.ofNullable(JSONObject.getNames(propertyDefs)).orElse(new String[0])).forEach(
                key -> builder.addPropertySchema(key, loadChild(propertyDefs.getJSONObject(key)).build()));
    }//from   w  w  w . jav  a  2s .  co m
    if (schemaJson.has("additionalProperties")) {
        typeMultiplexer("additionalProperties", schemaJson.get("additionalProperties")).ifIs(Boolean.class)
                .then(builder::additionalProperties).ifObject()
                .then(def -> builder.schemaOfAdditionalProperties(loadChild(def).build())).requireAny();
    }
    if (schemaJson.has("required")) {
        JSONArray requiredJson = schemaJson.getJSONArray("required");
        IntStream.range(0, requiredJson.length()).mapToObj(requiredJson::getString)
                .forEach(builder::addRequiredProperty);
    }
    if (schemaJson.has("patternProperties")) {
        JSONObject patternPropsJson = schemaJson.getJSONObject("patternProperties");
        String[] patterns = JSONObject.getNames(patternPropsJson);
        if (patterns != null) {
            for (String pattern : patterns) {
                builder.patternProperty(pattern, loadChild(patternPropsJson.getJSONObject(pattern)).build());
            }
        }
    }
    ifPresent("dependencies", JSONObject.class, deps -> addDependencies(builder, deps));
    return builder;
}

From source file:com.teradata.benchto.driver.loader.BenchmarkLoader.java

private List<Benchmark> filterFreshBenchmarks(List<Benchmark> benchmarks) {
    List<Benchmark> benchmarksWithFrequencySet = benchmarks.stream()
            .filter(benchmark -> benchmark.getFrequency().isPresent()).collect(toList());

    if (benchmarksWithFrequencySet.isEmpty()) {
        return ImmutableList.of();
    }/*  ww  w .j  a v a 2  s  .c o m*/

    List<String> benchmarkUniqueNames = benchmarksWithFrequencySet.stream()
            .map(benchmark -> benchmark.getUniqueName()).collect(toList());
    List<Duration> successfulExecutionAges = benchmarkServiceClient
            .getBenchmarkSuccessfulExecutionAges(benchmarkUniqueNames);

    return IntStream.range(0, benchmarksWithFrequencySet.size()).mapToObj(i -> {
        Benchmark benchmark = benchmarksWithFrequencySet.get(i);
        if (successfulExecutionAges.get(i).compareTo(benchmark.getFrequency().get()) <= 0) {
            return benchmark;
        } else {
            return null;
        }
    }).filter(benchmark -> benchmark != null).collect(toList());
}

From source file:org.apache.nifi.processors.standard.util.TestJdbcCommon.java

@Test
public void testClob() throws Exception {
    try (final Statement stmt = con.createStatement()) {
        stmt.executeUpdate("CREATE TABLE clobtest (id INT, text CLOB(64 K))");
        stmt.execute("INSERT INTO blobtest VALUES (41, NULL)");
        PreparedStatement ps = con.prepareStatement("INSERT INTO clobtest VALUES (?, ?)");
        ps.setInt(1, 42);/*  www . ja  v  a  2s  .c om*/
        final char[] buffer = new char[4002];
        IntStream.range(0, 4002).forEach((i) -> buffer[i] = String.valueOf(i % 10).charAt(0));
        ReaderInputStream isr = new ReaderInputStream(new CharArrayReader(buffer), Charset.defaultCharset());

        // - set the value of the input parameter to the input stream
        ps.setAsciiStream(2, isr, 4002);
        ps.execute();
        isr.close();

        final ResultSet resultSet = stmt.executeQuery("select * from clobtest");

        final ByteArrayOutputStream outStream = new ByteArrayOutputStream();
        JdbcCommon.convertToAvroStream(resultSet, outStream, false);

        final byte[] serializedBytes = outStream.toByteArray();
        assertNotNull(serializedBytes);

        // Deserialize bytes to records
        final InputStream instream = new ByteArrayInputStream(serializedBytes);

        final DatumReader<GenericRecord> datumReader = new GenericDatumReader<>();
        try (final DataFileStream<GenericRecord> dataFileReader = new DataFileStream<>(instream, datumReader)) {
            GenericRecord record = null;
            while (dataFileReader.hasNext()) {
                // Reuse record object by passing it to next(). This saves us from
                // allocating and garbage collecting many objects for files with
                // many items.
                record = dataFileReader.next(record);
                Integer id = (Integer) record.get("ID");
                Object o = record.get("TEXT");
                if (id == 41) {
                    assertNull(o);
                } else {
                    assertNotNull(o);
                    assertEquals(4002, o.toString().length());
                }
            }
        }
    }
}

From source file:org.broadinstitute.gatk.tools.walkers.genotyper.afcalc.AlleleFrequencyCalculatorUnitTest.java

@Test
public void testManySamplesWithLowConfidence() {
    // prior corresponding to 1000 observations of ref, 1 of a SNP
    // for this test, we want many pseudocounts in the prior because the new AF calculator learns the allele frequency
    // and we don't want the complication of the posterior being differetn from the prior
    final AlleleFrequencyCalculator afCalc = new AlleleFrequencyCalculator(1000, 1, 1, DEFAULT_PLOIDY); //prior corresponding to 1000 observations of ref, 1 of a SNP
    final List<Allele> alleles = Arrays.asList(A, B);

    // for FAIRLY_CONFIDENT_PL = 20, this genotype has about 100 times greater likelihood to be het than hom ref
    // with our prior giving 1000 times as much weight to ref, this implies a 1 in 5 chance of each sample having a copy of the alt allele
    // (that is, 100/1000 times the combinatorial factor of 2).  Thus the MLE for up to 2 samples should be zero
    // for five samples we should have one
    // for ten samples we will have more than twice as many as for five since the counts fromt he samples start to influence
    // the estimated allele frequency
    final Genotype AB = genotypeWithObviousCall(DIPLOID, BIALLELIC, new int[] { 0, 1, 1, 1 },
            FAIRLY_CONFIDENT_PL);//  w  w w  . j av a  2s  .  c o m

    final List<VariantContext> vcsWithDifferentNumbersOfSamples = IntStream.range(1, 11)
            .mapToObj(n -> makeVC(alleles, Collections.nCopies(n, AB))).collect(Collectors.toList());
    final int[] counts = vcsWithDifferentNumbersOfSamples.stream()
            .mapToInt(vc -> afCalc.getLog10PNonRef(vc).getAlleleCountAtMLE(B)).toArray();
    Assert.assertEquals(counts[0], 0); // one sample
    Assert.assertEquals(counts[1], 0); // two samples
    Assert.assertEquals(counts[4], 2); // five samples
    Assert.assertTrue(counts[8] >= 3); // ten samples
}