Example usage for com.google.common.base Optional orNull

List of usage examples for com.google.common.base Optional orNull

Introduction

In this page you can find the example usage for com.google.common.base Optional orNull.

Prototype

@Nullable
public abstract T orNull();

Source Link

Document

Returns the contained instance if it is present; null otherwise.

Usage

From source file:com.github.jsdossier.JsDoc.java

public Optional<Marker> getMarker(Annotation target) {
    for (Marker marker : info.getMarkers()) {
        Optional<Annotation> annotation = Annotation.forMarker(marker);
        if (target.equals(annotation.orNull())) {
            return Optional.of(marker);
        }/* www. j a  v  a2  s  .co  m*/
    }
    return Optional.absent();
}

From source file:google.registry.flows.host.HostUpdateFlow.java

@Override
public final EppResponse run() throws EppException {
    extensionManager.register(MetadataExtension.class);
    extensionManager.validate();//ww  w. j a  va  2  s .  c om
    validateClientIsLoggedIn(clientId);
    Update command = (Update) resourceCommand;
    Change change = command.getInnerChange();
    String suppliedNewHostName = change.getFullyQualifiedHostName();
    DateTime now = ofy().getTransactionTime();
    // Validation is disabled for superusers to allow renaming of existing invalid hostnames.
    // TODO(b/32328995): Remove superuser override once all bad data in prod has been fixed.
    if (!isSuperuser) {
        validateHostName(targetId);
    }
    HostResource existingHost = loadAndVerifyExistence(HostResource.class, targetId, now);
    boolean isHostRename = suppliedNewHostName != null;
    String oldHostName = targetId;
    String newHostName = firstNonNull(suppliedNewHostName, oldHostName);
    Optional<DomainResource> superordinateDomain = Optional
            .fromNullable(lookupSuperordinateDomain(validateHostName(newHostName), now));
    verifyUpdateAllowed(command, existingHost, superordinateDomain.orNull());
    if (isHostRename && loadAndGetKey(HostResource.class, newHostName, now) != null) {
        throw new HostAlreadyExistsException(newHostName);
    }
    AddRemove add = command.getInnerAdd();
    AddRemove remove = command.getInnerRemove();
    checkSameValuesNotAddedAndRemoved(add.getStatusValues(), remove.getStatusValues());
    checkSameValuesNotAddedAndRemoved(add.getInetAddresses(), remove.getInetAddresses());
    HostResource newHost = existingHost.asBuilder().setFullyQualifiedHostName(newHostName)
            .addStatusValues(add.getStatusValues()).removeStatusValues(remove.getStatusValues())
            .addInetAddresses(add.getInetAddresses()).removeInetAddresses(remove.getInetAddresses())
            .setLastEppUpdateTime(now).setLastEppUpdateClientId(clientId)
            // The superordinateDomain can be missing if the new name is external.
            // Note that the value of superordinateDomain is projected to the current time inside of
            // the lookupSuperordinateDomain(...) call above, so that it will never be stale.
            .setSuperordinateDomain(
                    superordinateDomain.isPresent() ? Key.create(superordinateDomain.get()) : null)
            .setLastSuperordinateChange(superordinateDomain == null ? null : now).build()
            // Rely on the host's cloneProjectedAtTime() method to handle setting of transfer data.
            .cloneProjectedAtTime(now);
    verifyHasIpsIffIsExternal(command, existingHost, newHost);
    ImmutableSet.Builder<ImmutableObject> entitiesToSave = new ImmutableSet.Builder<>();
    entitiesToSave.add(newHost);
    // Keep the {@link ForeignKeyIndex} for this host up to date.
    if (isHostRename) {
        // Update the foreign key for the old host name and save one for the new host name.
        entitiesToSave.add(ForeignKeyIndex.create(existingHost, now),
                ForeignKeyIndex.create(newHost, newHost.getDeletionTime()));
        updateSuperordinateDomains(existingHost, newHost);
    }
    enqueueTasks(existingHost, newHost);
    entitiesToSave.add(historyBuilder.setType(HistoryEntry.Type.HOST_UPDATE).setModificationTime(now)
            .setParent(Key.create(existingHost)).build());
    ofy().save().entities(entitiesToSave.build());
    return responseBuilder.build();
}

From source file:org.geogit.web.api.commands.BlameWebOp.java

/**
 * Runs the command and builds the appropriate response.
 * //from  www .j av  a2 s.c o m
 * @param context - the context to use for this command
 */
@Override
public void run(CommandContext context) {
    final Context geogit = this.getCommandLocator(context);

    Optional<ObjectId> commit = Optional.absent();
    if (branchOrCommit != null) {
        commit = geogit.command(RevParse.class).setRefSpec(branchOrCommit).call();
        if (!commit.isPresent()) {
            throw new CommandSpecException("Could not resolve branch or commit");
        }
    }

    try {
        final BlameReport report = geogit.command(BlameOp.class).setPath(path).setCommit(commit.orNull())
                .call();

        context.setResponseContent(new CommandResponse() {
            @Override
            public void write(ResponseWriter out) throws Exception {
                out.start();
                try {
                    out.writeBlameReport(report);
                } catch (XMLStreamException e) {
                    throw new CommandSpecException("Error writing stream.");
                }
                out.finish();
            }
        });
    } catch (BlameException e) {
        switch (e.statusCode) {
        case PATH_NOT_FEATURE:
            throw new CommandSpecException("The supplied path does not resolve to a feature");
        case FEATURE_NOT_FOUND:
            throw new CommandSpecException("The supplied path does not exist");
        }
    }
}

From source file:org.opendaylight.distributed.tx.impl.CachingReadWriteTx.java

public <T extends DataObject> CheckedFuture<Void, DTxException> asyncPut(
        final LogicalDatastoreType logicalDatastoreType, final InstanceIdentifier<T> instanceIdentifier,
        final T t) {
    increaseOperation();//  w  ww.  j  av a  2s.  com
    final SettableFuture<Void> retFuture = SettableFuture.create();

    CheckedFuture<Optional<T>, ReadFailedException> readFuture = null;
    try {
        readFuture = delegate.read(logicalDatastoreType, instanceIdentifier);
    } catch (Exception e) {
        readFuture = Futures
                .immediateFailedCheckedFuture(new ReadFailedException("Read exception in put action"));
    }

    Futures.addCallback(readFuture, new FutureCallback<Optional<T>>() {

        @Override
        public void onSuccess(final Optional<T> result) {
            synchronized (this) {
                cache.add(new CachedData(logicalDatastoreType, instanceIdentifier, result.orNull(),
                        ModifyAction.REPLACE));
            }

            final ListeningExecutorService executorService = MoreExecutors
                    .listeningDecorator(executorPoolPerCache);
            final ListenableFuture asyncPutFuture = executorService.submit(new Callable() {
                @Override
                public Object call() throws Exception {
                    delegate.put(logicalDatastoreType, instanceIdentifier, t);
                    return null;
                }
            });

            Futures.addCallback(asyncPutFuture, new FutureCallback() {
                @Override
                public void onSuccess(@Nullable Object result) {
                    decreaseOperation();
                    retFuture.set(null);
                }

                @Override
                public void onFailure(Throwable t) {
                    decreaseOperation();
                    LOG.trace("async put failure");
                    retFuture.setException(new DTxException.EditFailedException("async put failure", t));
                }
            });
        }

        @Override
        public void onFailure(final Throwable t) {
            decreaseOperation();
            retFuture.setException(
                    new DTxException.ReadFailedException("failed to read from node in put action", t));
        }
    });

    return Futures.makeChecked(retFuture, new Function<Exception, DTxException>() {
        @Nullable
        @Override
        public DTxException apply(@Nullable Exception e) {
            e = (Exception) e.getCause();
            return e instanceof DTxException ? (DTxException) e : new DTxException("put operation failed", e);
        }
    });
}

From source file:org.locationtech.geogig.web.api.commands.BlameWebOp.java

/**
 * Runs the command and builds the appropriate response.
 * //  www.j a v  a  2s. c o  m
 * @param context - the context to use for this command
 */
@Override
public void run(CommandContext context) {
    final Context geogig = this.getCommandLocator(context);

    Optional<ObjectId> commit = Optional.absent();
    if (branchOrCommit != null) {
        commit = geogig.command(RevParse.class).setRefSpec(branchOrCommit).call();
        if (!commit.isPresent()) {
            throw new CommandSpecException("Could not resolve branch or commit");
        }
    }

    try {
        final BlameReport report = geogig.command(BlameOp.class).setPath(path).setCommit(commit.orNull())
                .call();

        context.setResponseContent(new CommandResponse() {
            @Override
            public void write(ResponseWriter out) throws Exception {
                out.start();
                try {
                    out.writeBlameReport(report);
                } catch (XMLStreamException e) {
                    throw new CommandSpecException("Error writing stream.");
                }
                out.finish();
            }
        });
    } catch (BlameException e) {
        switch (e.statusCode) {
        case PATH_NOT_FEATURE:
            throw new CommandSpecException("The supplied path does not resolve to a feature");
        case FEATURE_NOT_FOUND:
            throw new CommandSpecException("The supplied path does not exist");
        }
    }
}

From source file:org.opendaylight.distributed.tx.impl.CachingReadWriteTx.java

public CheckedFuture<Void, DTxException> asyncDelete(final LogicalDatastoreType logicalDatastoreType,
        final InstanceIdentifier<?> instanceIdentifier) {
    increaseOperation();/* w w w. j  a  v  a2s  .  co m*/
    CheckedFuture<Optional<DataObject>, ReadFailedException> readFuture = null;
    try {
        readFuture = delegate.read(logicalDatastoreType, (InstanceIdentifier<DataObject>) instanceIdentifier);
    } catch (Exception e) {
        readFuture = Futures
                .immediateFailedCheckedFuture(new ReadFailedException("Read exception in delete action"));
    }

    final SettableFuture<Void> retFuture = SettableFuture.create();

    Futures.addCallback(readFuture, new FutureCallback<Optional<DataObject>>() {
        @Override
        public void onSuccess(final Optional<DataObject> result) {
            synchronized (this) {
                cache.add(new CachedData(logicalDatastoreType, instanceIdentifier, result.orNull(),
                        ModifyAction.DELETE));
            }

            final ListeningExecutorService executorService = MoreExecutors
                    .listeningDecorator(executorPoolPerCache);
            final ListenableFuture asyncDeleteFuture = executorService.submit(new Callable() {
                @Override
                public Object call() throws Exception {
                    delegate.delete(logicalDatastoreType, instanceIdentifier);
                    return null;
                }
            });

            Futures.addCallback(asyncDeleteFuture, new FutureCallback() {
                @Override
                public void onSuccess(@Nullable Object result) {
                    decreaseOperation();
                    retFuture.set(null);
                }

                @Override
                public void onFailure(Throwable t) {
                    decreaseOperation();
                    LOG.trace("async delete failure");
                    retFuture.setException(new DTxException.EditFailedException("async delete failure", t));
                }
            });
        }

        @Override
        public void onFailure(final Throwable t) {
            decreaseOperation();
            retFuture.setException(
                    new DTxException.ReadFailedException("failed to read from node in delete action", t));
        }
    });

    return Futures.makeChecked(retFuture, new Function<Exception, DTxException>() {
        @Nullable
        @Override
        public DTxException apply(@Nullable Exception e) {
            e = (Exception) e.getCause();
            return e instanceof DTxException ? (DTxException) e
                    : new DTxException("delete operation failed ", e);
        }
    });
}

From source file:com.vityuk.ginger.provider.DefaultLocalizationProvider.java

private MessageFormat getMessageFormat(Locale locale, String key, String selector) {
    try {//  w  ww .j  av  a2s .co  m
        MessageKey messageKey = new MessageKey(locale, key, selector);
        Optional<MessageFormat> messageFormatOptional = messageFormatCache.getUnchecked(messageKey);
        return messageFormatOptional.orNull();
    } catch (UncheckedExecutionException e) {
        throw Throwables.propagate(e.getCause());
    }
}

From source file:org.opendaylight.yangtools.yang.data.impl.schema.tree.ChoiceModificationStrategy.java

private void enforceCases(final NormalizedNode<?, ?> normalizedNode) {
    Verify.verify(normalizedNode instanceof ChoiceNode);
    final Collection<DataContainerChild<?, ?>> children = ((ChoiceNode) normalizedNode).getValue();
    if (!children.isEmpty()) {
        final DataContainerChild<?, ?> firstChild = children.iterator().next();
        final CaseEnforcer enforcer = caseEnforcers.get(firstChild.getIdentifier());
        Verify.verifyNotNull(enforcer,//from  w  ww  .j a  v  a2s.c o m
                "Case enforcer cannot be null. Most probably, child node %s of choice node %s does not belong in current tree type.",
                firstChild.getIdentifier(), normalizedNode.getIdentifier());

        // Make sure no leaves from other cases are present
        for (final CaseEnforcer other : exclusions.get(enforcer)) {
            for (final PathArgument id : other.getAllChildIdentifiers()) {
                final Optional<NormalizedNode<?, ?>> maybeChild = NormalizedNodes.getDirectChild(normalizedNode,
                        id);
                Preconditions.checkArgument(!maybeChild.isPresent(),
                        "Child %s (from case %s) implies non-presence of child %s (from case %s), which is %s",
                        firstChild.getIdentifier(), enforcer, id, other, maybeChild.orNull());
            }
        }

        // Make sure all mandatory children are present
        enforcer.enforceOnTreeNode(normalizedNode);
    }
}

From source file:google.registry.batch.MapreduceEntityCleanupUtil.java

/**
 * Finds the requested number of root pipeline jobs eligible for deletion.
 *
 * <p>Loops through the root jobs returned by the pipeline API, searching for those with a
 * matching name in an appropriate state, and older than the specified cutoff date.
 *
 * <p>Regardless of the setting of maxJobs, a maximum of {@link
 * #MAX_NUMBER_OF_JOBS_PER_SEARCH} will be returned. If there might be more jobs available to
 * find, a cursor will be returned, which can be used in a subsequent call to {@link
 * #findEligibleJobsByJobName} to continue the search.
 *
 * @param jobName the desired job name; if null, all jobs are considered to match
 * @param cutoffDate eligible jobs must have both startTime and endTime before cutoffDate; if
 *     startTime and/or endTime are null, they are considered to be old enough -- this is because
 *     many jobs do lack at least one of these, and we don't want such jobs to stick around
 *     forever and not get deleted/*from  www.j a v  a2s .  c  o m*/
 * @param maxJobs the maximum number of jobs to return; if absent, return all eligible jobs (see
 *     note above about {@link #MAX_NUMBER_OF_JOBS_PER_SEARCH})
 * @param ignoreState if true, jobs will be included regardless of the state
 * @param cursor if present, a cursor returned from a previous call to the method; the search will
 *     be picked up where it left off
 * @return job IDs of the eligible jobs
 */
EligibleJobResults findEligibleJobsByJobName(@Nullable String jobName, DateTime cutoffDate,
        Optional<Integer> maxJobs, boolean ignoreState, Optional<String> cursor) {
    if (maxJobs.isPresent() && (maxJobs.get() <= 0)) {
        return EligibleJobResults.create(ImmutableSet.<String>of(), Optional.<String>absent());
    }
    Set<String> eligibleJobs = new HashSet<>();
    Pair<? extends Iterable<JobRecord>, String> pair = PipelineManager.queryRootPipelines(jobName,
            cursor.orNull(), getMaxNumberOfJobsPerSearch());
    for (JobRecord jobRecord : pair.getFirst()) {
        if (((jobRecord.getStartTime() == null) || jobRecord.getStartTime().before(cutoffDate.toDate()))
                && ((jobRecord.getEndTime() == null) || jobRecord.getEndTime().before(cutoffDate.toDate()))
                && (ignoreState || (jobRecord.getState() == JobRecord.State.FINALIZED)
                        || (jobRecord.getState() == JobRecord.State.STOPPED))) {
            eligibleJobs.add(jobRecord.getRootJobKey().getName());
            if (maxJobs.isPresent() && (eligibleJobs.size() >= maxJobs.get())) {
                return EligibleJobResults.create(ImmutableSet.copyOf(eligibleJobs), Optional.<String>absent());
            }
        }
    }
    return EligibleJobResults.create(ImmutableSet.copyOf(eligibleJobs),
            Optional.fromNullable(pair.getSecond()));
}

From source file:com.arpnetworking.tsdcore.statistics.MeanStatistic.java

/**
 * {@inheritDoc}/*  w  w w .  j  av a  2 s.co  m*/
 */
@Override
public Quantity calculateAggregations(final List<AggregatedData> aggregations) {
    double weighted = 0D;
    int count = 0;
    Optional<Unit> unit = Optional.absent();
    for (final AggregatedData aggregation : aggregations) {
        final double populationSize = aggregation.getPopulationSize();
        weighted += aggregation.getValue().getValue() * populationSize;
        count += populationSize;
        unit = unit.or(aggregation.getValue().getUnit());
    }
    return new Quantity.Builder().setValue(weighted / count).setUnit(unit.orNull()).build();
}