Example usage for java.util.stream IntStream range

List of usage examples for java.util.stream IntStream range

Introduction

In this page you can find the example usage for java.util.stream IntStream range.

Prototype

public static IntStream range(int startInclusive, int endExclusive) 

Source Link

Document

Returns a sequential ordered IntStream from startInclusive (inclusive) to endExclusive (exclusive) by an incremental step of 1 .

Usage

From source file:org.lambdamatic.analyzer.LambdaExpressionAnalyzer.java

/**
 * Returns the {@link SerializedLambdaInfo} for the given {@code expression}
 * //from   w w  w.  jav a 2  s.co  m
 * @param expression the expression to analyze.
 * @return the corresponding {@link SerializedLambda}
 * @throws AnalyzeException if something wrong happened (a {@link NoSuchMethodException},
 *         {@link IllegalArgumentException} or {@link InvocationTargetException} exception
 *         occurred).
 * 
 * @see http ://docs.oracle.com/javase/8/docs/api/java/lang/invoke/SerializedLambda.html
 * @see http ://stackoverflow.com/questions/21860875/printing-debug-info-on-errors
 *      -with-java-8-lambda-expressions/21879031 #21879031
 */
private static SerializedLambdaInfo getSerializedLambdaInfo(final Object expression) {
    final Class<?> cl = expression.getClass();
    try {
        final Method m = cl.getDeclaredMethod("writeReplace");
        m.setAccessible(true);
        final Object result = m.invoke(expression);
        if (result instanceof SerializedLambda) {
            final SerializedLambda serializedLambda = (SerializedLambda) result;
            LOGGER.debug(" Lambda FunctionalInterface: {}.{} ({})",
                    serializedLambda.getFunctionalInterfaceClass(),
                    serializedLambda.getFunctionalInterfaceMethodName(),
                    serializedLambda.getFunctionalInterfaceMethodSignature());
            LOGGER.debug(" Lambda Implementation: {}.{} ({})", serializedLambda.getImplClass(),
                    serializedLambda.getImplMethodName(), serializedLambda.getImplMethodSignature());
            IntStream.range(0, serializedLambda.getCapturedArgCount())
                    .forEach(i -> LOGGER.debug("  with Captured Arg(" + i + "): '"
                            + serializedLambda.getCapturedArg(i)
                            + ((serializedLambda.getCapturedArg(i) != null)
                                    ? "' (" + serializedLambda.getCapturedArg(i).getClass().getName() + ")"
                                    : "")));
            return new SerializedLambdaInfo(serializedLambda);
        }
    } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
        throw new AnalyzeException("Failed to find the Serialized form for the given Lambda Expression", e);
    }
    return null;
}

From source file:ai.grakn.test.engine.tasks.storage.TaskStateGraphStoreTest.java

@Test
public void testGetAllTaskStates() {
    int sizeBeforeAdding = stateStorage.getTasks(null, null, null, null, 0, 0).size();

    int numberTasks = 10;
    IntStream.range(0, numberTasks).mapToObj(i -> task()).forEach(stateStorage::newState);

    Set<TaskState> res = stateStorage.getTasks(null, null, null, null, 0, 0);
    assertEquals(sizeBeforeAdding + numberTasks, res.size());
}

From source file:org.csanchez.jenkins.plugins.kubernetes.KubernetesTestUtil.java

/**
 * Delete pods with matching labels// w ww.j a va  2 s. co  m
 * 
 * @param client
 * @param labels
 * @param wait
 *            wait some time for pods to finish
 * @return whether any pod was deleted
 * @throws Exception
 */
public static boolean deletePods(KubernetesClient client, Map<String, String> labels, boolean wait)
        throws Exception {

    if (client != null) {

        // wait for 90 seconds for all pods to be terminated
        if (wait) {
            LOGGER.log(INFO, "Waiting for pods to terminate");
            ForkJoinPool forkJoinPool = new ForkJoinPool(1);
            try {
                forkJoinPool.submit(() -> IntStream.range(1, 1_000_000).anyMatch(i -> {
                    try {
                        FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> pods = client
                                .pods().withLabels(labels);
                        LOGGER.log(INFO, "Still waiting for pods to terminate: {0}", print(pods));
                        boolean allTerminated = pods.list().getItems().isEmpty();
                        if (allTerminated) {
                            LOGGER.log(INFO, "All pods are terminated: {0}", print(pods));
                        } else {
                            LOGGER.log(INFO, "Still waiting for pods to terminate: {0}", print(pods));
                            Thread.sleep(5000);
                        }
                        return allTerminated;
                    } catch (InterruptedException e) {
                        LOGGER.log(INFO, "Waiting for pods to terminate - interrupted");
                        return true;
                    }
                })).get(90, TimeUnit.SECONDS);
            } catch (TimeoutException e) {
                LOGGER.log(INFO, "Waiting for pods to terminate - timed out");
                // job not done in interval
            }
        }

        FilterWatchListDeletable<Pod, PodList, Boolean, Watch, Watcher<Pod>> pods = client.pods()
                .withLabels(labels);
        if (!pods.list().getItems().isEmpty()) {
            LOGGER.log(WARNING, "Deleting leftover pods: {0}", print(pods));
            if (Boolean.TRUE.equals(pods.delete())) {
                return true;
            }

        }
    }
    return false;
}

From source file:com.bwc.ora.models.Lrp.java

/**
 * Get an array containing the gray scale values of the LRP arranged from
 * smallest y-value to greatest y-value (i.e. top to bottom in the image).
 * The pixel data is taken from the transformed OCT with all appropriately
 * applied transformations. Each call to this method will go and grab the
 * pixel data allowing for updated intensity data to be grabbed immediately.
 *
 * @return// w  w  w . ja  v  a2  s.  c  om
 */
private int[] getIntensityValues() {

    BufferedImage octToProcess = transformedOctImage == null ? oct.getTransformedOct() : transformedOctImage;
    int[] rgbArray = octToProcess.getRGB(x, y, width, height, null, 0, width);

    return IntStream.range(0, height)
            .map(scanY -> (int) Math.round(Arrays.stream(rgbArray, width * scanY, width * (scanY + 1))
                    .map(ImageUtils::calculateGrayScaleValue).average().orElse(0)))
            .toArray();
}

From source file:com.github.mrenou.jacksonatic.internal.AnnotatedClassLogger.java

private static void logParameters(StringBuilder sb, AnnotatedWithParams annotatedWithParams) {
    IntStream
            .range(0, annotatedWithParams.getParameterCount()).filter(
                    index -> hasAnnotation(annotatedWithParams.getParameterAnnotations(index)))
            .forEach(index -> sb/*from ww w .  ja  va  2  s. com*/
                    .append(" > p" + index + ": "
                            + annotationsItToStr(annotatedWithParams.getParameterAnnotations(index)))
                    .append(ln));
}

From source file:org.apache.hadoop.hbase.client.example.AsyncClientExample.java

@Override
public int run(String[] args) throws Exception {
    if (args.length < 1 || args.length > 2) {
        System.out.println("Usage: " + this.getClass().getName() + " tableName [num_operations]");
        return -1;
    }/*  ww w .  j a v a  2s  . c o  m*/
    TableName tableName = TableName.valueOf(args[0]);
    int numOps = args.length > 1 ? Integer.parseInt(args[1]) : DEFAULT_NUM_OPS;
    ExecutorService threadPool = Executors.newFixedThreadPool(THREAD_POOL_SIZE,
            Threads.newDaemonThreadFactory("AsyncClientExample"));
    // We use AsyncTable here so we need to provide a separated thread pool. RawAsyncTable does not
    // need a thread pool and may have a better performance if you use it correctly as it can save
    // some context switches. But if you use RawAsyncTable incorrectly, you may have a very bad
    // impact on performance so use it with caution.
    CountDownLatch latch = new CountDownLatch(numOps);
    IntStream.range(0, numOps).forEach(i -> {
        CompletableFuture<AsyncConnection> future = getConn();
        addListener(future, (conn, error) -> {
            if (error != null) {
                LOG.warn("failed to get async connection for " + i, error);
                latch.countDown();
                return;
            }
            AsyncTable<?> table = conn.getTable(tableName, threadPool);
            addListener(table.put(new Put(getKey(i)).addColumn(FAMILY, QUAL, Bytes.toBytes(i))),
                    (putResp, putErr) -> {
                        if (putErr != null) {
                            LOG.warn("put failed for " + i, putErr);
                            latch.countDown();
                            return;
                        }
                        LOG.info("put for " + i + " succeeded, try getting");
                        addListener(table.get(new Get(getKey(i))), (result, getErr) -> {
                            if (getErr != null) {
                                LOG.warn("get failed for " + i);
                                latch.countDown();
                                return;
                            }
                            if (result.isEmpty()) {
                                LOG.warn("get failed for " + i + ", server returns empty result");
                            } else if (!result.containsColumn(FAMILY, QUAL)) {
                                LOG.warn("get failed for " + i + ", the result does not contain "
                                        + Bytes.toString(FAMILY) + ":" + Bytes.toString(QUAL));
                            } else {
                                int v = Bytes.toInt(result.getValue(FAMILY, QUAL));
                                if (v != i) {
                                    LOG.warn("get failed for " + i + ", the value of " + Bytes.toString(FAMILY)
                                            + ":" + Bytes.toString(QUAL) + " is " + v + ", exected " + i);
                                } else {
                                    LOG.info("get for " + i + " succeeded");
                                }
                            }
                            latch.countDown();
                        });
                    });
        });
    });
    latch.await();
    closeConn().get();
    return 0;
}

From source file:org.lightjason.agentspeak.action.buildin.graph.CAdjacencyMatrix.java

/**
 * converts a graph into an adjacency matrix
 *
 * @param p_graph graph/*from w ww  .j  a va  2 s . c o  m*/
 * @param p_cost map with edges and costs
 * @param p_defaultcost default cost value (on non-existing map values)
 * @param p_type matrix type
 * @return pair of double matrix and vertices
 */
private static Pair<DoubleMatrix2D, Collection<?>> apply(final AbstractGraph<Object, Object> p_graph,
        final Map<?, Number> p_cost, final double p_defaultcost, final EType p_type) {
    // index map for matching vertex to index position within matrix
    final Map<Object, Integer> l_index = new HashMap<>();

    // extract vertices from edges
    p_graph.getEdges().stream().map(p_graph::getEndpoints).flatMap(i -> Stream.of(i.getFirst(), i.getSecond()))
            .forEach(i -> l_index.putIfAbsent(i, 0));

    // define for each vertex an index number in [0, size)
    StreamUtils.zip(l_index.keySet().stream(), IntStream.range(0, l_index.size()).boxed(), ImmutablePair::new)
            .forEach(i -> l_index.put(i.getKey(), i.getValue()));

    final DoubleMatrix2D l_matrix;
    switch (p_type) {
    case SPARSE:
        l_matrix = new SparseDoubleMatrix2D(l_index.size(), l_index.size());
        break;

    default:
        l_matrix = new DenseDoubleMatrix2D(l_index.size(), l_index.size());
    }

    // map costs to matrix
    p_graph.getEdges().stream()
            .map(i -> new ImmutablePair<>(p_graph.getEndpoints(i),
                    p_cost.getOrDefault(i, p_defaultcost).doubleValue()))
            .forEach(i -> l_matrix.setQuick(l_index.get(i.getLeft().getFirst()),
                    l_index.get(i.getLeft().getSecond()),
                    i.getRight() + l_matrix.getQuick(l_index.get(i.getLeft().getFirst()),
                            l_index.get(i.getLeft().getSecond()))));

    return new ImmutablePair<>(l_matrix, new ArrayList<>(l_index.keySet()));
}

From source file:org.lightjason.agentspeak.consistency.CConsistency.java

@Override
public final IConsistency call() throws Exception {
    if (m_data.size() < 2)
        return this;

    // get key list of map for addressing elements in the correct order
    final ArrayList<IAgent<?>> l_keys = new ArrayList<>(m_data.keySet());

    // calculate markov chain transition matrix
    final DoubleMatrix2D l_matrix = new DenseDoubleMatrix2D(m_data.size(), m_data.size());
    IntStream.range(0, l_keys.size()).parallel().boxed().forEach(i -> {

        final IAgent<?> l_item = l_keys.get(i);
        IntStream.range(i + 1, l_keys.size()).boxed().forEach(j -> {

            final double l_value = this.getMetricValue(l_item, l_keys.get(j));
            l_matrix.setQuick(i, j, l_value);
            l_matrix.setQuick(j, i, l_value);

        });/*from  ww  w  .j a  v  a 2s  . c om*/

        // row-wise normalization for getting probabilities
        final double l_norm = Algebra.DEFAULT.norm1(l_matrix.viewRow(i));
        if (l_norm != 0)
            l_matrix.viewRow(i).assign(Mult.div(l_norm));

        // set epsilon slope for preventing periodic markov chains
        l_matrix.setQuick(i, i, m_epsilon);
    });

    // check for a zero-matrix
    final DoubleMatrix1D l_eigenvector = l_matrix.zSum() <= m_data.size() * m_epsilon
            ? new DenseDoubleMatrix1D(m_data.size())
            : m_algorithm.getStationaryDistribution(m_iteration, l_matrix);

    // calculate the inverted probability and normalize with 1-norm
    l_eigenvector.assign(PROBABILITYINVERT);
    l_eigenvector.assign(Functions.div(Algebra.DEFAULT.norm1(l_eigenvector)));

    // set consistency value for each entry and update statistic
    m_statistic.clear();
    IntStream.range(0, l_keys.size()).boxed().forEach(i -> {
        m_statistic.addValue(l_eigenvector.get(i));
        m_data.put(l_keys.get(i), l_eigenvector.get(i));
    });

    return this;
}

From source file:org.ballerinalang.langserver.command.testgen.renderer.TemplateBasedRendererOutput.java

private void computeFocusPosition(String placeHolderName, String newContent) {
    if (PlaceHolder.CONTENT.getName().equals(placeHolderName) && focusLineAcceptor != null) {
        String[] lines = newContent.split(CommonUtil.LINE_SEPARATOR_SPLIT);
        IntStream.range(0, lines.length).filter(i -> lines[i].contains("function " + focusFunctionName))
                .findFirst().ifPresent(i -> {
                    focusLineAcceptor.accept(i - 2 < 0 ? 0 : i - 2, 0);
                });//from  w  w w  .  j a v  a2s  . c o  m
    }
}

From source file:org.lightjason.agentspeak.action.builtin.graph.CAdjacencyMatrix.java

/**
 * converts a graph into an adjacency matrix
 *
 * @param p_graph graph/*  ww  w.j a v  a2s.co m*/
 * @param p_cost map with edges and costs
 * @param p_defaultcost default cost value (on non-existing map values)
 * @param p_type matrix type
 * @return pair of double matrix and vertices
 */
private static Pair<DoubleMatrix2D, Collection<?>> apply(@Nonnull final Graph<Object, Object> p_graph,
        @Nonnull final Map<?, Number> p_cost, final double p_defaultcost, @Nonnull final EType p_type) {
    // index map for matching vertex to index position within matrix
    final Map<Object, Integer> l_index = new HashMap<>();

    // extract vertices from edges
    p_graph.getEdges().stream().map(p_graph::getEndpoints).flatMap(i -> Stream.of(i.getFirst(), i.getSecond()))
            .forEach(i -> l_index.putIfAbsent(i, 0));

    // define for each vertex an index number in [0, size)
    StreamUtils.zip(l_index.keySet().stream(), IntStream.range(0, l_index.size()).boxed(), ImmutablePair::new)
            .forEach(i -> l_index.put(i.getKey(), i.getValue()));

    final DoubleMatrix2D l_matrix;
    switch (p_type) {
    case SPARSE:
        l_matrix = new SparseDoubleMatrix2D(l_index.size(), l_index.size());
        break;

    default:
        l_matrix = new DenseDoubleMatrix2D(l_index.size(), l_index.size());
    }

    // map costs to matrix
    p_graph.getEdges().stream()
            .map(i -> new ImmutablePair<>(p_graph.getEndpoints(i),
                    p_cost.getOrDefault(i, p_defaultcost).doubleValue()))
            .forEach(i -> l_matrix.setQuick(l_index.get(i.getLeft().getFirst()),
                    l_index.get(i.getLeft().getSecond()),
                    i.getRight() + l_matrix.getQuick(l_index.get(i.getLeft().getFirst()),
                            l_index.get(i.getLeft().getSecond()))));

    // on undirected graphs, add the transposefor cost duplicating
    if (p_graph instanceof UndirectedGraph<?, ?>)
        l_matrix.assign(IAlgebra.ALGEBRA.transpose(l_matrix).copy(), Functions.plus);

    return new ImmutablePair<>(l_matrix, new ArrayList<>(l_index.keySet()));
}