List of usage examples for com.google.common.base Optional orNull
@Nullable public abstract T orNull();
From source file:com.on_site.frizzle.debug.LoggingFunction.java
@Override public Object call(Context cx, Scriptable scope, Scriptable thisObj, Object[] args) { Optional<Object> value = null; try {// ww w . ja va 2s . c om value = Optional.fromNullable(func.call(cx, scope, thisObj, args)); return LoggingMixin.instrument(Object.class, name + "()", value); } finally { String lhs = thisObj == null ? name : thisObj + name; if (value == null) { LoggingMixin.log("{0}({1}) => abrupt", lhs, printableArgs(args)); } else { LoggingMixin.log("{0}({1}) => {2}", lhs, printableArgs(args), value.orNull()); } } }
From source file:org.locationtech.geogig.api.porcelain.RemoteRemoveOp.java
/** * Executes the remote-remove operation. * //from ww w .j av a 2 s. co m * @return the {@link Remote} that was removed, or {@link Optional#absent()} if the remote * didn't exist. */ @Override protected Remote _call() { if (name == null || name.isEmpty()) { throw new RemoteException(StatusCode.MISSING_NAME); } ConfigDatabase config = configDatabase(); List<String> allRemotes = config.getAllSubsections("remote"); if (!allRemotes.contains(name)) { throw new RemoteException(StatusCode.REMOTE_NOT_FOUND); } Remote remote = null; String remoteSection = "remote." + name; Optional<String> remoteFetchURL = config.get(remoteSection + ".url"); Optional<String> remoteFetch = config.get(remoteSection + ".fetch"); Optional<String> remotePushURL = Optional.absent(); Optional<String> remoteMapped = config.get(remoteSection + ".mapped"); Optional<String> remoteMappedBranch = config.get(remoteSection + ".mappedBranch"); Optional<String> remoteUserName = config.get(remoteSection + ".username"); Optional<String> remotePassword = config.get(remoteSection + ".password"); if (remoteFetchURL.isPresent() && remoteFetch.isPresent()) { remotePushURL = config.get(remoteSection + ".pushurl"); } remote = new Remote(name, remoteFetchURL.or(""), remotePushURL.or(remoteFetchURL.or("")), remoteFetch.or(""), remoteMapped.or("false").equals("true"), remoteMappedBranch.orNull(), remoteUserName.orNull(), remotePassword.orNull()); config.removeSection(remoteSection); // Remove refs final ImmutableSet<Ref> localRemoteRefs = command(LsRemote.class).retrieveLocalRefs(true) .setRemote(Suppliers.ofInstance(Optional.of(remote))).call(); for (Ref localRef : localRemoteRefs) { command(UpdateRef.class).setDelete(true).setName(localRef.getName()).call(); } return remote; }
From source file:de.azapps.mirakel.new_ui.fragments.ListsFragment.java
public void setAccount(final Optional<AccountMirakel> accountMirakelOptional) { this.accountMirakelOptional = accountMirakelOptional; final Bundle args = new Bundle(); args.putParcelable(ARGUMENT_ACCOUNT, accountMirakelOptional.orNull()); getLoaderManager().restartLoader(0, args, this); }
From source file:org.opencms.xml.containerpage.CmsFormatterConfiguration.java
/** * Gets the detail formatter to use for the given type and container width.<p> * * @param types the container types (comma separated) * @param containerWidth the container width * * @return the detail formatter to use// w w w. j ava 2 s .c o m */ public I_CmsFormatterBean getDetailFormatter(String types, int containerWidth) { // detail formatters must still match the type or width Predicate<I_CmsFormatterBean> checkValidDetailFormatter = Predicates .and(new MatchesTypeOrWidth(types, containerWidth, true), new IsDetail()); Optional<I_CmsFormatterBean> result = Iterables.tryFind(m_allFormatters, checkValidDetailFormatter); return result.orNull(); }
From source file:org.locationtech.geogig.storage.StorageType.java
/** * Verifies that the repository is compatible with the provided format name and version for this * storage type./*ww w . j av a 2 s .c o m*/ * * @param configDB the config database * @param formatName the format name of the storage type * @param version the version of the storage format * @return {@code true} if the storage type was configured and verified, {@code false} if it was * unset * @throws RepositoryConnectionException */ public boolean verify(ConfigDatabase configDB, String formatName, String version) throws RepositoryConnectionException { Optional<String> storageName = configDB.get("storage." + key); Optional<String> storageVersion = configDB.get(formatName + ".version"); boolean unset = !storageName.isPresent(); boolean valid = storageName.isPresent() && formatName.equals(storageName.get()) && storageVersion.isPresent() && version.equals(storageVersion.get()); if (!(unset || valid)) { throw new RepositoryConnectionException( "Cannot open " + key + " database with format: " + formatName + " and version: " + version + ", found format: " + storageName.orNull() + ", version: " + storageVersion.orNull()); } return !unset; }
From source file:rapture.audit.AuditLogCache.java
public AuditLog getAuditLog(final CallingContext ctx, final RaptureURI logURI) { try {//from w w w . j a va 2 s . co m Optional<AuditLog> o = auditLogs.get(logURI, new Callable<Optional<AuditLog>>() { @Override public Optional<AuditLog> call() throws Exception { try { Optional<AuditLog> log = Optional.of(createAuditLog(ctx, logURI)); return log; } catch (RaptureException e) { log.error("Could not load audit log for " + logURI); return Optional.absent(); } } }); return o.orNull(); } catch (ExecutionException e) { log.error("Could not create audit log " + e.getMessage()); return null; } }
From source file:org.locationtech.geogig.porcelain.RemoteResolve.java
/** * Executes the remote-add operation.//from ww w. j a va 2 s. c o m * * @return the {@link Remote} that was added. */ @Override protected Optional<Remote> _call() { if (name == null || name.isEmpty()) { throw new RemoteException(StatusCode.MISSING_NAME); } Optional<Remote> result = Optional.absent(); ConfigDatabase config = configDatabase(); List<String> allRemotes = config.getAllSubsections("remote"); if (allRemotes.contains(name)) { String remoteSection = "remote." + name; Optional<String> remoteFetchURL = config.get(remoteSection + ".url"); Optional<String> remoteFetch = config.get(remoteSection + ".fetch"); Optional<String> remoteMapped = config.get(remoteSection + ".mapped"); Optional<String> remoteMappedBranch = config.get(remoteSection + ".mappedBranch"); Optional<String> remoteUserName = config.get(remoteSection + ".username"); Optional<String> remotePassword = config.get(remoteSection + ".password"); if (remoteFetchURL.isPresent() && remoteFetch.isPresent()) { Optional<String> remotePushURL = config.get(remoteSection + ".pushurl"); Remote remote = new Remote(name, remoteFetchURL.get(), remotePushURL.or(remoteFetchURL.get()), remoteFetch.get(), remoteMapped.or("false").equals("true"), remoteMappedBranch.orNull(), remoteUserName.orNull(), remotePassword.orNull()); result = Optional.of(remote); } } return result; }
From source file:org.apache.gobblin.runtime.instance.StandardGobblinInstanceLauncher.java
protected StandardGobblinInstanceLauncher(String name, Configurable instanceConf, StandardGobblinInstanceDriver.Builder driverBuilder, Optional<MetricContext> metricContext, Optional<Logger> log, SharedResourcesBroker<GobblinScopeTypes> instanceBroker) { _log = log.or(LoggerFactory.getLogger(getClass())); _name = name;// ww w .j av a2s. c o m _instanceConf = instanceConf; _driver = driverBuilder.withInstanceEnvironment(this).build(); _instrumentationEnabled = metricContext.isPresent(); _metricContext = metricContext.orNull(); _instanceBroker = instanceBroker; }
From source file:google.registry.model.registry.label.PremiumListUtils.java
/** * Persists a new or updated PremiumList object and its descendant entities to Datastore. * * <p>The flow here is: save the new premium list entries parented on that revision entity, * save/update the PremiumList, and then delete the old premium list entries associated with the * old revision./* ww w . j a va2 s . c o m*/ * * <p>This is the only valid way to save these kinds of entities! */ public static PremiumList savePremiumListAndEntries(final PremiumList premiumList, ImmutableMap<String, PremiumListEntry> premiumListEntries) { final Optional<PremiumList> oldPremiumList = PremiumList.get(premiumList.getName()); // Create the new revision (with its bloom filter) and parent the entries on it. final PremiumListRevision newRevision = PremiumListRevision.create(premiumList, premiumListEntries.keySet()); final Key<PremiumListRevision> newRevisionKey = Key.create(newRevision); ImmutableSet<PremiumListEntry> parentedEntries = parentPremiumListEntriesOnRevision( premiumListEntries.values(), newRevisionKey); // Save the new child entities in a series of transactions. for (final List<PremiumListEntry> batch : partition(parentedEntries, TRANSACTION_BATCH_SIZE)) { ofy().transactNew(new VoidWork() { @Override public void vrun() { ofy().save().entities(batch); } }); } // Save the new PremiumList and revision itself. PremiumList updated = ofy().transactNew(new Work<PremiumList>() { @Override public PremiumList run() { DateTime now = ofy().getTransactionTime(); // Assert that the premium list hasn't been changed since we started this process. PremiumList existing = ofy().load().type(PremiumList.class).parent(getCrossTldKey()) .id(premiumList.getName()).now(); checkState(Objects.equals(existing, oldPremiumList.orNull()), "PremiumList was concurrently edited"); PremiumList newList = premiumList.asBuilder().setLastUpdateTime(now) .setCreationTime(oldPremiumList.isPresent() ? oldPremiumList.get().creationTime : now) .setRevision(newRevisionKey).build(); ofy().save().entities(newList, newRevision); return newList; } }); // Update the cache. cachePremiumLists.put(premiumList.getName(), updated); // Delete the entities under the old PremiumList. if (oldPremiumList.isPresent()) { deleteRevisionAndEntriesOfPremiumList(oldPremiumList.get()); } return updated; }
From source file:com.facebook.buck.rules.AbstractCachingBuildRule.java
/** * This method is invoked once all of this rule's dependencies are built. * <p>/*from w w w . j ava 2 s . co m*/ * This method should be executed on a fresh Runnable in BuildContext's ListeningExecutorService, * so there is no reason to schedule new work in a new Runnable. * <p> * All exit paths through this method should resolve {@link #buildRuleResult} before exiting. To * that end, this method should never throw an exception, or else Buck will hang waiting for * {@link #buildRuleResult} to be resolved. */ private BuildResult buildOnceDepsAreBuilt(final BuildContext context, OnDiskBuildInfo onDiskBuildInfo, BuildInfoRecorder buildInfoRecorder) throws IOException { // Compute the current RuleKey and compare it to the one stored on disk. RuleKey ruleKey = getRuleKey(); Optional<RuleKey> cachedRuleKey = onDiskBuildInfo.getRuleKey(); // If the RuleKeys match, then there is nothing to build. if (ruleKey.equals(cachedRuleKey.orNull())) { context.logBuildInfo("[UNCHANGED %s]", getFullyQualifiedName()); return new BuildResult(BuildRuleSuccess.Type.MATCHING_RULE_KEY, CacheResult.LOCAL_KEY_UNCHANGED_HIT); } // Deciding whether we need to rebuild is tricky business. We want to rebuild as little as // possible while always being sound. // // For java_library rules that depend only on their first-order deps, // they only need to rebuild themselves if any of the following conditions hold: // (1) The definition of the build rule has changed. // (2) Any of the input files (which includes resources as well as .java files) have changed. // (3) The ABI of any of its dependent java_library rules has changed. // // For other types of build rules, we have to be more conservative when rebuilding. In those // cases, we rebuild if any of the following conditions hold: // (1) The definition of the build rule has changed. // (2) Any of the input files have changed. // (3) Any of the RuleKeys of this rule's deps have changed. // // Because a RuleKey for a rule will change if any of its transitive deps have changed, that // means a change in one of the leaves can result in almost all rules being rebuilt, which is // slow. Fortunately, we limit the effects of this when building Java code when checking the ABI // of deps instead of the RuleKey for deps. if (this instanceof AbiRule) { AbiRule abiRule = (AbiRule) this; RuleKey ruleKeyNoDeps = getRuleKeyWithoutDeps(); Optional<RuleKey> cachedRuleKeyNoDeps = onDiskBuildInfo.getRuleKeyWithoutDeps(); if (ruleKeyNoDeps.equals(cachedRuleKeyNoDeps.orNull())) { // The RuleKey for the definition of this build rule and its input files has not changed. // Therefore, if the ABI of its deps has not changed, there is nothing to rebuild. Sha1HashCode abiKeyForDeps = abiRule.getAbiKeyForDeps(); Optional<Sha1HashCode> cachedAbiKeyForDeps = onDiskBuildInfo .getHash(AbiRule.ABI_KEY_FOR_DEPS_ON_DISK_METADATA); if (abiKeyForDeps.equals(cachedAbiKeyForDeps.orNull())) { // Re-copy the ABI metadata. // TODO(mbolin): This seems really bad: there could be other metadata to copy, too? buildInfoRecorder.addMetadata(AbiRule.ABI_KEY_ON_DISK_METADATA, onDiskBuildInfo.getValue(AbiRule.ABI_KEY_ON_DISK_METADATA).get()); buildInfoRecorder.addMetadata(AbiRule.ABI_KEY_FOR_DEPS_ON_DISK_METADATA, cachedAbiKeyForDeps.get().getHash()); return new BuildResult(BuildRuleSuccess.Type.MATCHING_DEPS_ABI_AND_RULE_KEY_NO_DEPS, CacheResult.LOCAL_KEY_UNCHANGED_HIT); } } } // Before deciding to build, check the ArtifactCache. // The fetched file is now a ZIP file, so it needs to be unzipped. CacheResult cacheResult = tryToFetchArtifactFromBuildCacheAndOverlayOnTopOfProjectFilesystem( buildInfoRecorder, context.getArtifactCache(), context.getProjectRoot(), context); // Run the steps to build this rule since it was not found in the cache. if (cacheResult.isSuccess()) { return new BuildResult(BuildRuleSuccess.Type.FETCHED_FROM_CACHE, cacheResult); } // The only remaining option is to build locally. try { executeCommandsNowThatDepsAreBuilt(context, onDiskBuildInfo, buildInfoRecorder); } catch (IOException | StepFailedException e) { return new BuildResult(e); } // Given that the Buildable has built successfully, record that the output file has been // written, assuming it has one. // TODO(mbolin): Buildable.getSteps() should use BuildableContext such that Buildable is // responsible for invoking recordArtifact() itself. Once that is done, this call to // recordArtifact() should be deleted. String pathToOutputFile = buildable.getPathToOutputFile(); if (pathToOutputFile != null && pathToOutputFile.startsWith(BuckConstant.GEN_DIR)) { String prefix = BuckConstant.GEN_DIR + '/' + getBuildTarget().getBasePathWithSlash(); Path pathToArtifact = Paths.get(pathToOutputFile.substring(prefix.length())); buildInfoRecorder.recordArtifact(pathToArtifact); } return new BuildResult(BuildRuleSuccess.Type.BUILT_LOCALLY, CacheResult.MISS); }