Example usage for java.util OptionalInt getAsInt

List of usage examples for java.util OptionalInt getAsInt

Introduction

In this page you can find the example usage for java.util OptionalInt getAsInt.

Prototype

public int getAsInt() 

Source Link

Document

If a value is present, returns the value, otherwise throws NoSuchElementException .

Usage

From source file:org.ambraproject.wombat.service.ArticleTransformServiceImpl.java

private void setVersionLink(ArticlePointer articleId, Transformer transformer) {
    final String versionLinkParameter;
    if (articleId.isOriginalRequestVersioned()) {
        final String versionType;
        final int versionNumber;

        OptionalInt revisionNumber = articleId.getRevisionNumber();
        if (revisionNumber.isPresent()) {
            versionType = DoiVersionArgumentResolver.REVISION_PARAMETER;
            versionNumber = revisionNumber.getAsInt();
        } else {//from www.ja  v  a  2  s .c  om
            versionType = DoiVersionArgumentResolver.INGESTION_PARAMETER;
            versionNumber = articleId.getIngestionNumber();
        }

        // Pre-build a snippet of a URL, meant to be concatenated onto a link in an HTML attribute.
        // Assumes that it will always be preceded by at least one other parameter,
        // else we would need a question mark instead of an ampersand.
        // TODO: Build the URL syntax in XSLT instead
        versionLinkParameter = "&" + versionType + "=" + versionNumber;
    } else {
        versionLinkParameter = "";
    }
    transformer.setParameter("versionLinkParameter", versionLinkParameter);
}

From source file:org.apache.nifi.schemaregistry.hortonworks.HortonworksSchemaRegistry.java

private RecordSchema retrieveSchemaByName(final SchemaIdentifier schemaIdentifier)
        throws org.apache.nifi.schema.access.SchemaNotFoundException, IOException {

    final SchemaRegistryClient client = getClient();

    final SchemaVersionInfo versionInfo;
    final Long schemaId;

    final Optional<String> schemaName = schemaIdentifier.getName();
    if (!schemaName.isPresent()) {
        throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                "Cannot retrieve schema because Schema Name is not present");
    }/*w  w  w  .  ja v a 2 s  .c  om*/

    final Optional<String> schemaBranchName = schemaIdentifier.getBranch();
    final OptionalInt schemaVersion = schemaIdentifier.getVersion();

    try {
        final SchemaMetadataInfo metadataInfo = client.getSchemaMetadataInfo(schemaName.get());
        if (metadataInfo == null) {
            throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                    "Could not find schema with name '" + schemaName + "'");
        }

        schemaId = metadataInfo.getId();
        if (schemaId == null) {
            throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                    "Could not find schema with name '" + schemaName + "'");
        }

        // possible scenarios are name only, name + branch, or name + version
        if (schemaVersion.isPresent()) {
            final SchemaVersionKey schemaVersionKey = new SchemaVersionKey(schemaName.get(),
                    schemaVersion.getAsInt());
            versionInfo = getSchemaVersionInfo(client, schemaVersionKey);
        } else {
            versionInfo = getLatestSchemaVersionInfo(client, schemaName.get(), schemaBranchName.orElse(null));
        }

        if (versionInfo == null || versionInfo.getVersion() == null) {
            final String message = createErrorMessage("Could not find schema", schemaName, schemaBranchName,
                    schemaVersion);
            throw new org.apache.nifi.schema.access.SchemaNotFoundException(message);
        }

    } catch (final Exception e) {
        final String message = createErrorMessage("Failed to retrieve schema", schemaName, schemaBranchName,
                schemaVersion);
        handleException(message, e);
        return null;
    }

    final String schemaText = versionInfo.getSchemaText();

    final SchemaIdentifier resultSchemaIdentifier = SchemaIdentifier.builder().id(schemaId)
            .name(schemaName.get()).branch(schemaBranchName.orElse(null)).version(versionInfo.getVersion())
            .build();

    final Tuple<SchemaIdentifier, String> tuple = new Tuple<>(resultSchemaIdentifier, schemaText);
    return schemaNameToSchemaMap.computeIfAbsent(tuple, t -> {
        final Schema schema = new Schema.Parser().parse(schemaText);
        return AvroTypeUtil.createSchema(schema, schemaText, resultSchemaIdentifier);
    });
}

From source file:org.apache.nifi.schemaregistry.hortonworks.HortonworksSchemaRegistry.java

private RecordSchema retrieveSchemaByIdAndVersion(final SchemaIdentifier schemaIdentifier)
        throws org.apache.nifi.schema.access.SchemaNotFoundException, IOException {
    final SchemaRegistryClient client = getClient();

    final String schemaName;
    final SchemaVersionInfo versionInfo;

    final OptionalLong schemaId = schemaIdentifier.getIdentifier();
    if (!schemaId.isPresent()) {
        throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                "Cannot retrieve schema because Schema Id is not present");
    }//w w w .j  av  a2s  .  c  om

    final OptionalInt version = schemaIdentifier.getVersion();
    if (!version.isPresent()) {
        throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                "Cannot retrieve schema because Schema Version is not present");
    }

    try {
        final SchemaMetadataInfo info = client.getSchemaMetadataInfo(schemaId.getAsLong());
        if (info == null) {
            throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                    "Could not find schema with ID '" + schemaId + "' and version '" + version + "'");
        }

        final SchemaMetadata metadata = info.getSchemaMetadata();
        schemaName = metadata.getName();

        final SchemaVersionKey schemaVersionKey = new SchemaVersionKey(schemaName, version.getAsInt());
        versionInfo = getSchemaVersionInfo(client, schemaVersionKey);
        if (versionInfo == null) {
            throw new org.apache.nifi.schema.access.SchemaNotFoundException(
                    "Could not find schema with ID '" + schemaId + "' and version '" + version + "'");
        }
    } catch (final Exception e) {
        handleException("Failed to retrieve schema with ID '" + schemaId + "' and version '" + version + "'",
                e);
        return null;
    }

    final String schemaText = versionInfo.getSchemaText();

    final SchemaIdentifier resultSchemaIdentifier = SchemaIdentifier.builder().name(schemaName)
            .id(schemaId.getAsLong()).version(version.getAsInt()).build();

    final Tuple<SchemaIdentifier, String> tuple = new Tuple<>(resultSchemaIdentifier, schemaText);
    return schemaNameToSchemaMap.computeIfAbsent(tuple, t -> {
        final Schema schema = new Schema.Parser().parse(schemaText);
        return AvroTypeUtil.createSchema(schema, schemaText, resultSchemaIdentifier);
    });
}

From source file:org.apache.nifi.schemaregistry.hortonworks.HortonworksSchemaRegistry.java

private String createErrorMessage(final String baseMessage, final Optional<String> schemaName,
        final Optional<String> branchName, final OptionalInt version) {
    final StringBuilder builder = new StringBuilder(baseMessage).append(" with name '")
            .append(schemaName.orElse("null")).append("'");

    if (branchName.isPresent()) {
        builder.append(" and branch '").append(branchName.get()).append("'");
    }/*from  w w w  .  j ava2s.c  o  m*/

    if (version.isPresent()) {
        builder.append(" and version '").append(version.getAsInt()).append("'");
    }

    return builder.toString();
}

From source file:org.broadinstitute.gatk.tools.walkers.cancer.ClusteredReadPosition.java

/**
 *
 * @param vc/*from   w w  w  .j  ava2 s. c  o  m*/
 * @param pralm
 * @return median of left and right offsets and their median absolute deviations. does not return null.
 */
private Optional<MedianStatistics> computeReadPositionStats(final VariantContext vc,
        final PerReadAlleleLikelihoodMap pralm) {
    final int variantStartPosition = vc.getStart();
    final List<Integer> tumorLeftOffsets = new ArrayList<>();
    final List<Integer> tumorRightOffsets = new ArrayList<>();
    for (final Map.Entry<GATKSAMRecord, Map<Allele, Double>> readAlleleLikelihood : pralm.getLikelihoodReadMap()
            .entrySet()) {
        final MostLikelyAllele mostLikelyAllele = PerReadAlleleLikelihoodMap
                .getMostLikelyAllele(readAlleleLikelihood.getValue());
        final GATKSAMRecord read = readAlleleLikelihood.getKey();
        if (mostLikelyAllele.getMostLikelyAllele().isReference() || !mostLikelyAllele.isInformative()
                || !isUsableRead(read)) {
            continue;
        }

        final Pair<OptionalInt, OptionalInt> offsetPair = getVariantPositionInRead(read, variantStartPosition);
        final OptionalInt variantPositionInReadFromLeft = offsetPair.getFirst();
        final OptionalInt variantPositionInReadFromRight = offsetPair.getSecond();

        // suffices to check only the left offset because the right offset depends on it
        if (variantPositionInReadFromLeft.isPresent()) {
            tumorLeftOffsets.add(variantPositionInReadFromLeft.getAsInt());
            tumorRightOffsets.add(variantPositionInReadFromRight.getAsInt());
        }
    }

    if (tumorLeftOffsets.isEmpty() || tumorRightOffsets.isEmpty()) {
        // This condition seems to arise when the reads as aligned in the bam (as represented by PRALM) do not contain the alt read found by HaplotypeCaller
        logger.warn("At Position " + vc.getContig() + ": " + vc.getStart()
                + " , the left or right offset list is empty");
        return Optional.empty();
    }

    // The following (mapToDouble() in particular) causes ClusteredReadPosition to be not added to ClassMap
    // leftMedian = median.evaluate(tumorLeftOffsets.stream().mapToDouble( x -> x ).toArray());
    // rightMedian = median.evaluate(tumorRightOffsets.stream().mapToDouble( x -> x).toArray());

    // until we understand why mapToDouble() causes the above error, have to compute medians in two steps
    // first use a for loop to manually cast integer to doubles, then call median :: evaluate
    double[] tumorLeftOffsetsDouble = new double[tumorLeftOffsets.size()];
    double[] tumorRightOffsetsDouble = new double[tumorRightOffsets.size()];
    for (int i = 0; i < tumorLeftOffsets.size(); i++) {
        tumorLeftOffsetsDouble[i] = (double) tumorLeftOffsets.get(i);
        tumorRightOffsetsDouble[i] = (double) tumorRightOffsets.get(i);
    }

    Median median = new Median();
    double leftMedian = median.evaluate(tumorLeftOffsetsDouble);
    double rightMedian = median.evaluate(tumorRightOffsetsDouble);
    double leftMAD = calculateMAD(tumorLeftOffsets, leftMedian);
    double rightMAD = calculateMAD(tumorRightOffsets, rightMedian);

    return (Optional.of(new MedianStatistics(leftMedian, rightMedian, leftMAD, rightMAD)));
}

From source file:org.broadinstitute.gatk.tools.walkers.cancer.m2.TumorPowerCalculator.java

private double calculatePower(final int numReads, final double alleleFraction) throws MathException {
    if (numReads == 0)
        return 0;

    // TODO: add the factor of 1/3
    final double probAltRead = alleleFraction * (1 - errorProbability)
            + (1 / 3) * (1 - alleleFraction) * errorProbability;
    final BinomialDistribution binom = new BinomialDistributionImpl(numReads, probAltRead);
    final double[] binomialProbabilities = IntStream.range(0, numReads + 1).mapToDouble(binom::probability)
            .toArray();/*from ww  w.  j a  va 2 s .c o  m*/

    // find the smallest number of ALT reads k such that tumorLOD(k) > tumorLODThreshold
    final OptionalInt smallestKAboveLogThreshold = IntStream.range(0, numReads + 1)
            .filter(k -> calculateTumorLod(numReads, k) > tumorLODThreshold).findFirst();

    if (!smallestKAboveLogThreshold.isPresent()) {
        return 0;
    }

    if (smallestKAboveLogThreshold.getAsInt() <= 0) {
        throw new IllegalStateException(
                "smallest k that meets the tumor LOD threshold is less than or equal to 0");
    }

    double power = Arrays
            .stream(binomialProbabilities, smallestKAboveLogThreshold.getAsInt(), binomialProbabilities.length)
            .sum();

    // here we correct for the fact that the exact lod threshold is likely somewhere between
    // the k and k-1 bin, so we prorate the power from that bin
    if (enableSmoothing) {
        final double tumorLODAtK = calculateTumorLod(numReads, smallestKAboveLogThreshold.getAsInt());
        final double tumorLODAtKMinusOne = calculateTumorLod(numReads,
                smallestKAboveLogThreshold.getAsInt() - 1);
        final double weight = 1
                - (tumorLODThreshold - tumorLODAtKMinusOne) / (tumorLODAtK - tumorLODAtKMinusOne);
        power += weight * binomialProbabilities[smallestKAboveLogThreshold.getAsInt() - 1];
    }

    return (power);
}

From source file:org.jamocha.dn.compiler.pathblocks.PathBlocks.java

protected static List<PathRule> createOutput(final List<Either<Rule, ExistentialProxy>> rules,
        final PathBlockSet resultBlockSet) {
    final Function<? super Block, ? extends Integer> characteristicNumber = block -> block
            .getFlatFilterInstances().size() / block.getRulesOrProxies().size();
    final TreeMap<Integer, CursorableLinkedList<Block>> blockMap = resultBlockSet.getBlocks().stream()
            .collect(groupingBy(characteristicNumber, TreeMap::new, toCollection(CursorableLinkedList::new)));
    // iterate over all the filter proxies ever used
    for (final FilterProxy filterProxy : FilterProxy.getFilterProxies()) {
        final Set<ExistentialProxy> existentialProxies = filterProxy.getProxies();
        // determine the largest characteristic number of the blocks containing filter instances
        // of one of the existential proxies (choice is arbitrary, since the filters and the
        // conflicts are identical if they belong to the same filter).
        final OptionalInt optMax = resultBlockSet.getRuleInstanceToBlocks()
                .computeIfAbsent(Either.right(existentialProxies.iterator().next()), newHashSet()).stream()
                .mapToInt(composeToInt(characteristicNumber, Integer::intValue)).max();
        if (!optMax.isPresent())
            continue;
        final int eCN = optMax.getAsInt();
        // get the list to append the blocks using the existential closure filter INSTANCE to
        final CursorableLinkedList<Block> targetList = blockMap.get(eCN);
        // for every existential part
        for (final ExistentialProxy existentialProxy : existentialProxies) {
            final FilterInstance exClosure = existentialProxy.getExistentialClosure();
            // create a list storing the blocks to move
            final List<Block> toMove = new ArrayList<>();
            for (final CursorableLinkedList<Block> blockList : blockMap.headMap(eCN, true).values()) {
                // iterate over the blocks in the current list
                for (final ListIterator<Block> iterator = blockList.listIterator(); iterator.hasNext();) {
                    final Block current = iterator.next();
                    // if the current block uses the current existential closure filter
                    // INSTANCE, it has to be moved
                    if (current.getFlatFilterInstances().contains(exClosure)) {
                        iterator.remove();
                        toMove.add(current);
                    }//from  w w  w. ja v  a 2  s  .  com
                }
            }
            // append the blocks to be moved (they were only removed so far)
            targetList.addAll(toMove);
        }
    }
    final Set<FilterInstance> constructedFIs = new HashSet<>();
    final Map<Either<Rule, ExistentialProxy>, Map<FilterInstance, Set<FilterInstance>>> ruleToJoinedWith = new HashMap<>();
    final Map<Set<FilterInstance>, PathFilterList> joinedWithToComponent = new HashMap<>();
    // at this point, the network can be constructed
    for (final CursorableLinkedList<Block> blockList : blockMap.values()) {
        for (final Block block : blockList) {
            final List<Either<Rule, ExistentialProxy>> blockRules = Lists
                    .newArrayList(block.getRulesOrProxies());
            final Set<List<FilterInstance>> filterInstanceColumns = Block
                    .getFilterInstanceColumns(block.getFilters(), block.getRuleToFilterToRow(), blockRules);
            // since we are considering blocks, it is either the case that all filter
            // instances of the column have been constructed or none of them have
            final PathSharedListWrapper sharedListWrapper = new PathSharedListWrapper(blockRules.size());
            final Map<Either<Rule, ExistentialProxy>, PathSharedList> ruleToSharedList = IntStream
                    .range(0, blockRules.size()).boxed()
                    .collect(toMap(blockRules::get, sharedListWrapper.getSharedSiblings()::get));
            final List<List<FilterInstance>> columnsToConstruct, columnsAlreadyConstructed;
            {
                final Map<Boolean, List<List<FilterInstance>>> partition = filterInstanceColumns.stream()
                        .collect(partitioningBy(column -> Collections.disjoint(column, constructedFIs)));
                columnsAlreadyConstructed = partition.get(Boolean.FALSE);
                columnsToConstruct = partition.get(Boolean.TRUE);
            }

            if (!columnsAlreadyConstructed.isEmpty()) {
                final Map<PathSharedList, LinkedHashSet<PathFilterList>> sharedPart = new HashMap<>();
                for (final List<FilterInstance> column : columnsAlreadyConstructed) {
                    for (final FilterInstance fi : column) {
                        sharedPart
                                .computeIfAbsent(ruleToSharedList.get(fi.getRuleOrProxy()), newLinkedHashSet())
                                .add(joinedWithToComponent
                                        .get(ruleToJoinedWith.get(fi.getRuleOrProxy()).get(fi)));
                    }
                }
                sharedListWrapper.addSharedColumns(sharedPart);
            }

            for (final List<FilterInstance> column : columnsToConstruct) {
                sharedListWrapper.addSharedColumn(column.stream().collect(
                        toMap(fi -> ruleToSharedList.get(fi.getRuleOrProxy()), FilterInstance::convert)));
            }
            constructedFIs.addAll(block.getFlatFilterInstances());
            for (final Entry<Either<Rule, ExistentialProxy>, Map<Filter, FilterInstancesSideBySide>> entry : block
                    .getRuleToFilterToRow().entrySet()) {
                final Either<Rule, ExistentialProxy> rule = entry.getKey();
                final Set<FilterInstance> joined = entry.getValue().values().stream()
                        .flatMap(sbs -> sbs.getInstances().stream()).collect(toSet());
                final Map<FilterInstance, Set<FilterInstance>> joinedWithMapForThisRule = ruleToJoinedWith
                        .computeIfAbsent(rule, newHashMap());
                joined.forEach(fi -> joinedWithMapForThisRule.put(fi, joined));
                joinedWithToComponent.put(joined, ruleToSharedList.get(rule));
            }
        }
    }
    final List<PathRule> pathRules = new ArrayList<>();
    for (final Either<Rule, ExistentialProxy> either : rules) {
        if (either.isRight()) {
            continue;
        }
        final List<PathFilterList> pathFilterLists = Stream
                .concat(either.left().get().existentialProxies.values().stream().map(p -> Either.right(p)),
                        Stream.of(either))
                .flatMap(e -> ruleToJoinedWith.getOrDefault(e, Collections.emptyMap()).values().stream()
                        .distinct())
                .map(joinedWithToComponent::get).collect(toList());
        pathRules.add(either.left().get().getOriginal().toPathRule(PathFilterList.toSimpleList(pathFilterLists),
                pathFilterLists.size() > 1 ? InitialFactPathsFinder.gather(pathFilterLists)
                        : Collections.emptySet()));
    }
    return pathRules;
}