Example usage for java.util.stream IntStream range

List of usage examples for java.util.stream IntStream range

Introduction

In this page you can find the example usage for java.util.stream IntStream range.

Prototype

public static IntStream range(int startInclusive, int endExclusive) 

Source Link

Document

Returns a sequential ordered IntStream from startInclusive (inclusive) to endExclusive (exclusive) by an incremental step of 1 .

Usage

From source file:com.github.viktornar.task.PrintTask.java

private void executeCommand(Atlas atlas) {
    createAtlasFolder(atlas.getAtlasFolder());

    Collection<Future<?>> futures = new LinkedList<>();
    IntStream.range(1, atlas.getRows() + 1).forEachOrdered(row -> {
        IntStream.range(1, atlas.getColumns() + 1).forEachOrdered(column -> {
            futures.add(executorService.submit(() -> {
                final Process process;
                try {
                    logger.info(format(" [x] Start printing job [row:'%s', column: '%s'] : '%s'", row, column,
                            atlas.toString()));
                    Extent pageExtent = getExtentOfPage(atlas, column, row);
                    Atlas atlasPage = new Atlas();
                    atlasPage.copyBean(atlas);
                    atlasPage.setExtent(pageExtent);
                    logger.info(format(" [x] Printing job command: '%s'", getCommand(atlasPage, row, column)));
                    process = Runtime.getRuntime().exec(getCommand(atlasPage, row, column));
                    process.waitFor();/*from   w ww. j a  v  a  2  s  .  c om*/
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }));
        });
    });

    futures.forEach(future -> {
        try {
            future.get();
            Atlas _atlas = repository.getAtlasById(atlas.getId());
            _atlas.setProgress(_atlas.getProgress() + 1);
            repository.updateAtlas(_atlas);
            logger.info(format(" [x] Finished printing job: '%s'", atlas.toString()));
            Thread.sleep(1000); // Sleep thread for 1 s for printing progress successful update
        } catch (InterruptedException | ExecutionException e) {
            logger.error(format(" [x] Error on printing job: '%s'", atlas.toString()));
            throw new RuntimeException(e);
        }
    });

    mergePages(atlas.getAtlasFolder(), format("%s.pdf", atlas.getAtlasName()));
}

From source file:org.deeplearning4j.examples.cifar.NearestNeighbor.java

@NotNull
private static Map<INDArray, Byte> readTrainingData() throws IOException {
    log.info("Reading training data.");
    Map<INDArray, Byte> trainingMap = new HashMap<>();
    CifarLoader cifarLoader = new CifarLoader(true);
    byte[] trainingImageData = IOUtils.toByteArray(cifarLoader.getInputStream());
    int imageLen = HEIGHT * WIDTH * CHANNELS;
    for (int imageIndex = 0; imageIndex < trainingImageData.length; imageIndex += (imageLen + 1)) {
        final byte[] imageByteArray = Arrays.copyOfRange(trainingImageData, imageIndex + 1,
                imageIndex + (imageLen + 1));
        final double[] imageDoubles = IntStream.range(0, imageByteArray.length)
                .mapToDouble(idx -> imageByteArray[idx]).toArray();
        trainingMap.put(abs(Nd4j.create(imageDoubles)), trainingImageData[imageIndex]);
    }/*from  w  w  w  .  j a  v a2s  . co  m*/
    log.info("Training data read.");
    return trainingMap;
}

From source file:org.apache.hadoop.hbase.client.TestAsyncNonMetaRegionLocatorConcurrenyLimit.java

@BeforeClass
public static void setUp() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(REGION_COPROCESSOR_CONF_KEY, CountingRegionObserver.class.getName());
    conf.setInt(MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE, MAX_ALLOWED);
    TEST_UTIL.startMiniCluster(3);//from  w w  w.j ava 2  s  . c  o m
    TEST_UTIL.getAdmin().setBalancerRunning(false, true);
    AsyncRegistry registry = AsyncRegistryFactory.getRegistry(TEST_UTIL.getConfiguration());
    CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(),
            User.getCurrent());
    LOCATOR = new AsyncNonMetaRegionLocator(CONN);
    SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
            .toArray(byte[][]::new);
    TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLIT_KEYS);
    TEST_UTIL.waitTableAvailable(TABLE_NAME);
}

From source file:edu.washington.gs.skyline.model.quantification.GroupComparisonDataSet.java

public LinearFitResult calculateFoldChange(String label) {
    List<Replicate> replicates = removeIncompleteReplicates(label, this.replicates);
    if (replicates.size() == 0) {
        return null;
    }//from   ww  w. j  ava2  s .  c  o  m
    List<Replicate> summarizedRows;
    if (replicates.stream().anyMatch(row -> null != row.getBioReplicate())) {
        Map<Pair<Boolean, Object>, List<Replicate>> groupedByBioReplicate = replicates.stream().collect(
                Collectors.groupingBy(replicate -> Pair.of(replicate.isControl(), replicate.bioReplicate)));
        summarizedRows = new ArrayList<>();
        for (Map.Entry<Pair<Boolean, Object>, List<Replicate>> entry : groupedByBioReplicate.entrySet()) {
            Double log2Abundance = calculateMean(entry.getValue().stream()
                    .map(replicateData -> replicateData.getLog2Abundance(label)).collect(Collectors.toList()));
            if (log2Abundance == null) {
                continue;
            }
            Replicate combinedReplicate = new Replicate(entry.getKey().getLeft(), entry.getKey().getValue());
            ResultFileData resultFileData = combinedReplicate.ensureResultFileData();
            resultFileData.setTransitionAreas(label,
                    TransitionAreas.fromMap(Collections.singletonMap("", Math.pow(2.0, log2Abundance))));
            if (getNormalizationMethod() instanceof NormalizationMethod.RatioToLabel) {
                TransitionAreas denominator = TransitionAreas.fromMap(Collections.singletonMap("", 1.0));
                resultFileData.setTransitionAreas(
                        ((NormalizationMethod.RatioToLabel) getNormalizationMethod()).getIsotopeLabelTypeName(),
                        denominator);
            }
            summarizedRows.add(combinedReplicate);
        }
    } else {
        summarizedRows = replicates;
    }

    List<Double> abundances = summarizedRows.stream()
            .map(replicateData -> replicateData.getLog2Abundance(label)).collect(Collectors.toList());
    List<Integer> features = Collections.nCopies(summarizedRows.size(), 0);
    List<Integer> runs = IntStream.range(0, summarizedRows.size()).boxed().collect(Collectors.toList());
    List<Integer> subjects = IntStream.range(0, summarizedRows.size()).boxed().collect(Collectors.toList());
    List<Boolean> subjectControls = summarizedRows.stream().map(Replicate::isControl)
            .collect(Collectors.toList());
    FoldChangeDataSet foldChangeDataSet = new FoldChangeDataSet(abundances, features, runs, subjects,
            subjectControls);
    DesignMatrix designMatrix = DesignMatrix.getDesignMatrix(foldChangeDataSet, false);
    LinearFitResult linearFitResult = designMatrix.performLinearFit().get(0);
    return linearFitResult;
}

From source file:io.yields.math.concepts.operator.Smoothness.java

private boolean isConstant(RealMatrix matrix) {
    double refElement = matrix.getEntry(0, 0);
    return !IntStream.range(0, matrix.getRowDimension()).asDoubleStream()
            .filter(element -> Math.abs(refElement - element) > EPS).findAny().isPresent();
}

From source file:io.yields.math.framework.kpi.ExplorerCsvExporter.java

@Override
public void export(Explorer<?> explorer, File destinationFile) {

    try {/*from   ww w  . j a v a2s  .c o  m*/

        // variables names
        String variablesHeader = explorer
                .all().findFirst().map(propertyVerification -> propertyVerification.getVariables().entrySet()
                        .stream().map(entry -> escape(entry.getKey())).sorted())
                .get().reduce("", commaSeparated());

        // properties result
        String propertiesHeader = explorer.all().findFirst()
                .map(propertyVerifications -> propertyVerifications.getResults().stream()
                        .map(propertyVerification -> PROPERTY_PREFIX + escape(propertyVerification.getName()))
                        .reduce("", commaSeparated()))
                .orElse("");

        // stats
        String statsHeader = explorer.getStats().stream().findFirst().map(
                stats -> stats.getStatsNames().stream().map(name -> escape(name)).reduce("", commaSeparated()))
                .orElse("");

        // descriptors
        String descriptorsHeader = explorer.getStats().stream().findFirst().map(stats -> stats
                .getDescriptorNames().stream().map(name -> escape(name)).reduce("", commaSeparated()))
                .orElse("");

        // variables content
        List<String> variablesContent = explorer.all().map(variablesToCsv()).collect(toList());

        // properties content
        List<String> propertiesContent = explorer.all().map(propertiesToCsv()).collect(toList());

        // stats content
        List<String> statsContent = explorer.getStats().stream().map(statsToCsv()).collect(toList());

        // descriptors content
        List<String> descriptorsContent = explorer.getStats().stream().map(descriptorsToCsv())
                .collect(toList());

        String header = "";

        header += variablesHeader;
        if (isNotBlank(propertiesHeader)) {
            header += "," + propertiesHeader;
        }

        if (isNotBlank(statsHeader)) {
            header += "," + statsHeader;
        }

        if (isNotBlank(descriptorsHeader)) {
            header += "," + descriptorsHeader;
        }

        try (PrintWriter fileWriter = new PrintWriter(new FileWriter(destinationFile))) {
            fileWriter.print(header);

            IntStream.range(0, variablesContent.size()).mapToObj(index -> {
                String line = variablesContent.get(index);
                if (!propertiesContent.isEmpty()) {
                    line += "," + propertiesContent.get(index);
                }
                if (!statsContent.isEmpty()) {
                    if (isNotBlank(statsContent.get(index))) {
                        line += "," + statsContent.get(index);
                    }
                }
                if (!descriptorsContent.isEmpty()) {
                    if (isNotBlank(descriptorsContent.get(index))) {
                        line += "," + descriptorsContent.get(index);
                    }
                }
                return line;
            }).forEach(line -> {
                fileWriter.println();
                fileWriter.print(line);
            });

        }

    } catch (IOException e) {
        throw new IllegalStateException(
                format("Failed to write data export to %s", destinationFile.getAbsolutePath()), e);
    }
}

From source file:it.greenvulcano.gvesb.gviamx.service.internal.PasswordResetManager.java

public void createPasswordResetRequest(String email) throws UserNotFoundException, UnverifiableUserException {

    User user = usersManager.getUser(email.toLowerCase());

    if (user.getPassword().isPresent()) {

        PasswordResetRequest passwordResetRequest = repository
                .get(email.toLowerCase(), PasswordResetRequest.class).orElseGet(PasswordResetRequest::new);
        passwordResetRequest.setUser((UserJPA) user);
        passwordResetRequest.setEmail(email.toLowerCase());
        passwordResetRequest.setIssueTime(new Date());
        passwordResetRequest.setExpireTime(expireTime);
        passwordResetRequest.setNotificationStatus(NotificationStatus.PENDING);

        byte[] token = new byte[4];
        secureRandom.nextBytes(token);//from   w  w w .j a  v a2  s  .  c  om

        String clearTextToken = String.format(Locale.US, "%02x%02x%02x%02x",
                IntStream.range(0, token.length).mapToObj(i -> Byte.valueOf(token[i])).toArray());
        passwordResetRequest.setToken(DigestUtils.sha256Hex(clearTextToken));

        repository.add(passwordResetRequest);

        passwordResetRequest.setClearToken(clearTextToken);
        notificationServices.stream().map(
                n -> new NotificationManager.NotificationTask(n, passwordResetRequest, repository, "reset"))
                .forEach(executor::submit);

    } else {
        throw new UnverifiableUserException(email);
    }
}

From source file:com.github.blindpirate.gogradle.util.StringUtils.java

public static Stream<String> eachSubPath(String packagePath) {
    Path path = Paths.get(packagePath);
    return IntStream.range(0, path.getNameCount()).mapToObj(i -> path.subpath(0, i + 1))
            .map(StringUtils::toUnixString);
}

From source file:com.pinterest.rocksplicator.controller.tasks.AddHostTask.java

@Override
public void process(Context ctx) throws Exception {
    final String clusterName = ctx.getCluster();
    final String hdfsDir = getParameter().getHdfsDir();
    final HostBean hostToAdd = getParameter().getHostToAdd();
    final int rateLimitMbs = getParameter().getRateLimitMbs();
    final Admin.Client client = clientFactory.getClient(hostToAdd);

    // 1) ping the host to add to make sure it's up and running.
    try {/*from  w  w  w  .  j av  a2  s .  c  om*/
        client.ping();
        // continue if #ping() succeeds.
    } catch (TException tex) {
        ctx.getTaskQueue().failTask(ctx.getId(), "Host to add is not alive!");
        return;
    }

    ClusterBean clusterBean = ZKUtil.getClusterConfig(zkClient, clusterName);
    if (clusterBean == null) {
        ctx.getTaskQueue().failTask(ctx.getId(), "Failed to read cluster config from zookeeper.");
        return;
    }

    for (SegmentBean segment : clusterBean.getSegments()) {
        // 2) find shards to serve for new host
        Set<Integer> shardToServe = IntStream.range(0, segment.getNumShards()).boxed()
                .collect(Collectors.toSet());
        for (HostBean host : segment.getHosts()) {
            // ignore hosts in different AZ than the new host
            if (host.getAvailabilityZone().equals(hostToAdd.getAvailabilityZone())) {
                host.getShards().forEach(shard -> shardToServe.remove(shard.getId()));
            }
        }

        // 3) upload shard data to the new host
        try {
            for (int shardId : shardToServe) {
                HostBean upstream = findMasterShard(shardId, segment.getHosts());
                if (upstream == null) {
                    //TODO: should we fail the task in this case?
                    LOG.error("Failed to find master shard for segment={}, shardId={}", segment.getName(),
                            shardId);
                    continue;
                }
                Admin.Client upstreamClient = clientFactory.getClient(upstream);
                String dbName = ShardUtil.getDBNameFromSegmentAndShardId(segment.getName(), shardId);
                String hdfsPath = ShardUtil.getHdfsPath(hdfsDir, clusterName, segment.getName(), shardId,
                        upstream.getIp(), getCurrentDateTime());

                upstreamClient.backupDB(new BackupDBRequest(dbName, hdfsPath).setLimit_mbs(rateLimitMbs));
                LOG.info("Backed up {} from {} to {}.", dbName, upstream.getIp(), hdfsPath);

                client.restoreDB(
                        new RestoreDBRequest(dbName, hdfsPath, upstream.getIp(), (short) upstream.getPort())
                                .setLimit_mbs(rateLimitMbs));
                LOG.info("Restored {} from {} to {}.", dbName, hdfsPath, hostToAdd.getIp());
            }
        } catch (TException ex) {
            String errMsg = String.format("Failed to upload shard data to host %s.", hostToAdd.getIp());
            LOG.error(errMsg, ex);
            ctx.getTaskQueue().failTask(ctx.getId(), errMsg);
            return;
        }

        // add shard config to new host
        hostToAdd.setShards(shardToServe.stream().map(id -> new ShardBean().setId(id).setRole(Role.SLAVE))
                .collect(Collectors.toList()));
        List<HostBean> newHostList = segment.getHosts();
        newHostList.add(hostToAdd);
        segment.setHosts(newHostList);
    }

    // 4) update cluster config in zookeeper
    ZKUtil.updateClusterConfig(zkClient, clusterBean);
    LOG.info("Updated config to {}", ConfigParser.serializeClusterConfig(clusterBean));
    ctx.getTaskQueue().finishTask(ctx.getId(),
            "Successfully added host " + hostToAdd.getIp() + ":" + hostToAdd.getPort());
}

From source file:org.apache.samza.sql.avro.AvroRelConverter.java

/**
 * Converts the nested avro object in SamzaMessage to relational message corresponding to
 * the tableName with relational schema.
 *//* ww  w.j a  v a  2 s.  c  o m*/
@Override
public SamzaSqlRelMessage convertToRelMessage(KV<Object, Object> samzaMessage) {
    List<String> payloadFieldNames = new ArrayList<>();
    List<Object> payloadFieldValues = new ArrayList<>();
    Object value = samzaMessage.getValue();
    if (value instanceof IndexedRecord) {
        fetchFieldNamesAndValuesFromIndexedRecord((IndexedRecord) value, payloadFieldNames, payloadFieldValues,
                payloadSchema);
    } else if (value == null) {
        // If the payload is null, set each record value as null
        payloadFieldNames.addAll(
                payloadSchema.getFields().stream().map(Schema.Field::name).collect(Collectors.toList()));
        IntStream.range(0, payloadFieldNames.size()).forEach(x -> payloadFieldValues.add(null));
    } else {
        String msg = "Avro message converter doesn't support messages of type " + value.getClass();
        LOG.error(msg);
        throw new SamzaException(msg);
    }

    return new SamzaSqlRelMessage(samzaMessage.getKey(), payloadFieldNames, payloadFieldValues,
            new SamzaSqlRelMsgMetadata("", "", ""));
}