Example usage for com.google.common.base Optional transform

List of usage examples for com.google.common.base Optional transform

Introduction

In this page you can find the example usage for com.google.common.base Optional transform.

Prototype

public abstract <V> Optional<V> transform(Function<? super T, V> function);

Source Link

Document

If the instance is present, it is transformed with the given Function ; otherwise, Optional#absent is returned.

Usage

From source file:org.apache.aurora.scheduler.thrift.ReadOnlySchedulerImpl.java

@Override
public Response getJobSummary(@Nullable String maybeNullRole) {
    Optional<String> ownerRole = Optional.fromNullable(maybeNullRole);

    Multimap<IJobKey, IScheduledTask> tasks = getTasks(maybeRoleScoped(ownerRole));
    Map<IJobKey, IJobConfiguration> jobs = getJobs(ownerRole, tasks);

    Function<IJobKey, JobSummary> makeJobSummary = jobKey -> {
        IJobConfiguration job = jobs.get(jobKey);
        JobSummary summary = new JobSummary().setJob(job.newBuilder())
                .setStats(Jobs.getJobStats(tasks.get(jobKey)).newBuilder());

        if (job.isSetCronSchedule()) {
            CrontabEntry crontabEntry = CrontabEntry.parse(job.getCronSchedule());
            Optional<Date> nextRun = cronPredictor.predictNextRun(crontabEntry);
            return nextRun.transform(date -> summary.setNextCronRunMs(date.getTime())).or(summary);
        } else {/*from   www. j av  a  2  s.  co  m*/
            return summary;
        }
    };

    ImmutableSet<JobSummary> jobSummaries = FluentIterable.from(jobs.keySet()).transform(makeJobSummary)
            .toSet();

    return ok(Result.jobSummaryResult(new JobSummaryResult().setSummaries(jobSummaries)));
}

From source file:com.twitter.aurora.scheduler.TaskVars.java

@Subscribe
public void taskChangedState(TaskStateChange stateChange) {
    if (!storageStarted) {
        return;//ww w .j a  va 2  s .c o m
    }

    IScheduledTask task = stateChange.getTask();
    if (stateChange.getOldState() != ScheduleStatus.INIT) {
        decrementCount(stateChange.getOldState());
    }
    incrementCount(task.getStatus());

    if (stateChange.getNewState() == ScheduleStatus.LOST) {
        final String host = stateChange.getTask().getAssignedTask().getSlaveHost();
        Optional<String> rack = storage.consistentRead(new Work.Quiet<Optional<String>>() {
            @Override
            public Optional<String> apply(StoreProvider storeProvider) {
                Optional<Attribute> rack = FluentIterable
                        .from(AttributeStore.Util.attributesOrNone(storeProvider, host)).firstMatch(IS_RACK);
                return rack.transform(ATTR_VALUE);
            }
        });

        if (rack.isPresent()) {
            countersByRack.getUnchecked(rack.get()).incrementAndGet();
        } else {
            LOG.warning("Failed to find rack attribute associated with host " + host);
        }
    }
}

From source file:com.qcadoo.mes.productionPerShift.hooks.ProductionPerShiftDetailsHooks.java

private void fillOperationProductLookup(final ViewDefinitionState view,
        final Optional<Entity> maybeMainOperationProduct) {
    LookupComponent producesField = (LookupComponent) view.getComponentByReference(PRODUCED_PRODUCT_LOOKUP_REF);
    producesField.setFieldValue(maybeMainOperationProduct.transform(EntityUtils.getIdExtractor()).orNull());
    producesField.requestComponentUpdateState();
}

From source file:springfox.documentation.swagger.schema.ApiModelPropertyPropertyBuilder.java

@Override
public void apply(ModelPropertyContext context) {
    Optional<ApiModelProperty> annotation = Optional.absent();

    if (context.getAnnotatedElement().isPresent()) {
        annotation = annotation.or(findApiModePropertyAnnotation(context.getAnnotatedElement().get()));
    }//from  ww  w  .ja  va 2s .  c om
    if (context.getBeanPropertyDefinition().isPresent()) {
        annotation = annotation
                .or(findPropertyAnnotation(context.getBeanPropertyDefinition().get(), ApiModelProperty.class));
    }
    if (annotation.isPresent()) {
        context.getBuilder().allowableValues(annotation.transform(toAllowableValues()).orNull())
                .required(annotation.transform(toIsRequired()).or(false))
                .readOnly(annotation.transform(toIsReadOnly()).or(false))
                .description(annotation.transform(toDescription()).orNull())
                .isHidden(annotation.transform(toHidden()).or(false))
                .type(annotation.transform(toType(context.getResolver())).orNull())
                .position(annotation.transform(toPosition()).or(0))
                .example(annotation.transform(toExample()).orNull());
    }
}

From source file:com.qcadoo.mes.productionPerShift.hooks.ProductionPerShiftDetailsHooks.java

private void fillProgressForDays(final AwesomeDynamicListComponent progressForDaysADL,
        final Optional<Entity> maybeTechnologyOperation, final Optional<Entity> maybeMainOperationProduct,
        final ProgressType progressType, final OrderState orderState) {
    List<Entity> progresses = maybeTechnologyOperation.transform(new Function<Entity, List<Entity>>() {

        @Override//  w ww .ja v  a  2 s .  c o m
        public List<Entity> apply(final Entity technologyOperation) {
            return progressForDayDataProvider.findForOperation(technologyOperation,
                    progressType == ProgressType.CORRECTED);
        }
    }).or(Collections.<Entity>emptyList());
    progressForDaysADL.setFieldValue(progresses);
    progressForDaysADL.requestComponentUpdateState();
}

From source file:com.eucalyptus.auth.euare.identity.region.RegionConfigurationManager.java

private boolean isValidAddress(final InetAddress inetAddress,
        final NonNullFunction<RegionInfo, Set<Cidr>> cidrTransform) {
    final Optional<RegionInfo> regionInfoOptional = getRegionInfo();
    final Predicate<InetAddress> addressPredicate = Predicates
            .or(Iterables.concat(regionInfoOptional.transform(cidrTransform).asSet()));
    return addressPredicate.apply(inetAddress);
}

From source file:com.facebook.buck.parser.ParallelDaemonicParserState.java

/**
 * Finds the build file responsible for the given {@link Path} and invalidates
 * all of the cached rules dependent on it.
 * @param path A {@link Path}, relative to the project root and "contained"
 *             within the build file to find and invalidate.
 *//*  w w w. jav  a  2  s .c  om*/
private synchronized void invalidateContainingBuildFile(Cell cell, BuildFileTree buildFiles, Path path) {
    Set<Path> packageBuildFiles = new HashSet<>();

    // Find the closest ancestor package for the input path.  We'll definitely need to invalidate
    // that.
    Optional<Path> packageBuildFile = buildFiles.getBasePathOfAncestorTarget(path);
    packageBuildFiles.addAll(packageBuildFile.transform(cell.getFilesystem().getAbsolutifier()).asSet());

    // If we're *not* enforcing package boundary checks, it's possible for multiple ancestor
    // packages to reference the same file
    if (!cell.isEnforcingBuckPackageBoundaries()) {
        while (packageBuildFile.isPresent() && packageBuildFile.get().getParent() != null) {
            packageBuildFile = buildFiles.getBasePathOfAncestorTarget(packageBuildFile.get().getParent());
            packageBuildFiles.addAll(packageBuildFile.asSet());
        }
    }

    // Invalidate all the packages we found.
    for (Path buildFile : packageBuildFiles) {
        invalidatePath(cell, buildFile.resolve(cell.getBuildFileName()));
    }
}

From source file:org.apache.aurora.scheduler.filter.ConstraintFilter.java

/**
 * Gets the veto (if any) for a scheduling constraint based on the {@link AttributeAggregate} this
 * filter was created with.//from   w  w  w  .jav a 2s  .c om
 *
 * @param constraint Scheduling filter to check.
 * @return A veto if the constraint is not satisfied based on the existing state of the job.
 */
Optional<Veto> getVeto(IConstraint constraint) {
    Iterable<IAttribute> sameNameAttributes = Iterables.filter(hostAttributes,
            new NameFilter(constraint.getName()));
    Optional<IAttribute> attribute;
    if (Iterables.isEmpty(sameNameAttributes)) {
        attribute = Optional.absent();
    } else {
        Set<String> attributeValues = ImmutableSet
                .copyOf(Iterables.concat(Iterables.transform(sameNameAttributes, GET_VALUES)));
        attribute = Optional.of(IAttribute.build(new Attribute(constraint.getName(), attributeValues)));
    }

    ITaskConstraint taskConstraint = constraint.getConstraint();
    switch (taskConstraint.getSetField()) {
    case VALUE:
        boolean matches = AttributeFilter.matches(attribute.transform(GET_VALUES).or(ImmutableSet.<String>of()),
                taskConstraint.getValue());
        return matches ? Optional.<Veto>absent() : Optional.of(mismatchVeto(constraint.getName()));

    case LIMIT:
        if (!attribute.isPresent()) {
            return Optional.of(mismatchVeto(constraint.getName()));
        }

        boolean satisfied = AttributeFilter.matches(attribute.get(), taskConstraint.getLimit().getLimit(),
                cachedjobState);
        return satisfied ? Optional.<Veto>absent() : Optional.of(limitVeto(constraint.getName()));

    default:
        throw new SchedulerException(
                "Failed to recognize the constraint type: " + taskConstraint.getSetField());
    }
}

From source file:org.apache.aurora.scheduler.state.TaskStateMachine.java

private TaskStateMachine(final String name, final Optional<IScheduledTask> task) {
    MorePreconditions.checkNotBlank(name);
    requireNonNull(task);/*from  w  w  w . ja v a 2  s. c  o m*/

    final TaskState initialState = task.transform(SCHEDULED_TO_TASK_STATE).or(DELETED);
    if (task.isPresent()) {
        Preconditions.checkState(initialState != DELETED, "A task that exists may not be in DELETED state.");
    } else {
        Preconditions.checkState(initialState == DELETED,
                "A task that does not exist must start in DELETED state.");
    }

    Consumer<Transition<TaskState>> manageTerminatedTasks = Consumers
            .combine(ImmutableList.<Consumer<Transition<TaskState>>>builder()
                    // Kill a task that we believe to be terminated when an attempt is made to revive.
                    .add(Consumers.filter(Transition.to(ASSIGNED, STARTING, RUNNING), addFollowupClosure(KILL)))
                    // Remove a terminated task that is requested to be deleted.
                    .add(Consumers.filter(Transition.to(DELETED), addFollowupClosure(DELETE))).build());

    final Consumer<Transition<TaskState>> manageRestartingTask = transition -> {
        switch (transition.getTo()) {
        case ASSIGNED:
            addFollowup(KILL);
            break;

        case STARTING:
            addFollowup(KILL);
            break;

        case RUNNING:
            addFollowup(KILL);
            break;

        case LOST:
            addFollowup(KILL);
            addFollowup(RESCHEDULE);
            break;

        case FINISHED:
            addFollowup(RESCHEDULE);
            break;

        case FAILED:
            addFollowup(RESCHEDULE);
            break;

        case KILLED:
            addFollowup(RESCHEDULE);
            break;

        default:
            // No-op.
        }
    };

    // To be called on a task transitioning into the FINISHED state.
    final Command rescheduleIfService = () -> {
        if (task.get().getAssignedTask().getTask().isIsService()) {
            addFollowup(RESCHEDULE);
        }
    };

    // To be called on a task transitioning into the FAILED state.
    final Command incrementFailuresMaybeReschedule = new Command() {
        @Override
        public void execute() {
            addFollowup(INCREMENT_FAILURES);

            // Max failures is ignored for service task.
            boolean isService = task.get().getAssignedTask().getTask().isIsService();

            // Max failures is ignored when set to -1.
            int maxFailures = task.get().getAssignedTask().getTask().getMaxTaskFailures();
            boolean belowMaxFailures = maxFailures == -1 || task.get().getFailureCount() < (maxFailures - 1);
            if (isService || belowMaxFailures) {
                addFollowup(RESCHEDULE);
            } else {
                LOG.info("Task " + name + " reached failure limit, not rescheduling");
            }
        }
    };

    final Consumer<Transition<TaskState>> deleteIfKilling = Consumers.filter(Transition.to(KILLING),
            addFollowupClosure(DELETE));

    stateMachine = StateMachine.<TaskState>builder(name).logTransitions().initialState(initialState)
            .addState(Rule.from(INIT).to(PENDING, THROTTLED))
            .addState(Rule.from(PENDING).to(ASSIGNED, KILLING).withCallback(deleteIfKilling))
            .addState(Rule.from(THROTTLED).to(PENDING, KILLING).withCallback(deleteIfKilling))
            .addState(Rule.from(ASSIGNED).to(STARTING, RUNNING, FINISHED, FAILED, RESTARTING, DRAINING, KILLED,
                    KILLING, LOST, PREEMPTING).withCallback(transition -> {
                        switch (transition.getTo()) {
                        case FINISHED:
                            rescheduleIfService.execute();
                            break;

                        case PREEMPTING:
                            addFollowup(KILL);
                            break;

                        case FAILED:
                            incrementFailuresMaybeReschedule.execute();
                            break;

                        case RESTARTING:
                            addFollowup(KILL);
                            break;

                        case DRAINING:
                            addFollowup(KILL);
                            break;

                        case KILLED:
                            addFollowup(RESCHEDULE);
                            break;

                        case LOST:
                            addFollowup(RESCHEDULE);
                            addFollowup(KILL);
                            break;

                        case KILLING:
                            addFollowup(KILL);
                            break;

                        default:
                            // No-op.
                        }
                    }))
            .addState(Rule.from(STARTING)
                    .to(RUNNING, FINISHED, FAILED, RESTARTING, DRAINING, KILLING, KILLED, LOST, PREEMPTING)
                    .withCallback(transition -> {
                        switch (transition.getTo()) {
                        case FINISHED:
                            rescheduleIfService.execute();
                            break;

                        case RESTARTING:
                            addFollowup(KILL);
                            break;

                        case DRAINING:
                            addFollowup(KILL);
                            break;

                        case PREEMPTING:
                            addFollowup(KILL);
                            break;

                        case FAILED:
                            incrementFailuresMaybeReschedule.execute();
                            break;

                        case KILLED:
                            addFollowup(RESCHEDULE);
                            break;

                        case KILLING:
                            addFollowup(KILL);
                            break;

                        case LOST:
                            addFollowup(RESCHEDULE);
                            break;

                        default:
                            // No-op.
                        }
                    }))
            .addState(Rule.from(RUNNING)
                    .to(FINISHED, RESTARTING, DRAINING, FAILED, KILLING, KILLED, LOST, PREEMPTING)
                    .withCallback(transition -> {
                        switch (transition.getTo()) {
                        case FINISHED:
                            rescheduleIfService.execute();
                            break;

                        case PREEMPTING:
                            addFollowup(KILL);
                            break;

                        case RESTARTING:
                            addFollowup(KILL);
                            break;

                        case DRAINING:
                            addFollowup(KILL);
                            break;

                        case FAILED:
                            incrementFailuresMaybeReschedule.execute();
                            break;

                        case KILLED:
                            addFollowup(RESCHEDULE);
                            break;

                        case KILLING:
                            addFollowup(KILL);
                            break;

                        case LOST:
                            addFollowup(RESCHEDULE);
                            break;

                        default:
                            // No-op.
                        }
                    }))
            .addState(Rule.from(FINISHED).to(DELETED).withCallback(manageTerminatedTasks))
            .addState(Rule.from(PREEMPTING).to(FINISHED, FAILED, KILLING, KILLED, LOST)
                    .withCallback(manageRestartingTask))
            .addState(Rule.from(RESTARTING).to(FINISHED, FAILED, KILLING, KILLED, LOST)
                    .withCallback(manageRestartingTask))
            .addState(Rule.from(DRAINING).to(FINISHED, FAILED, KILLING, KILLED, LOST)
                    .withCallback(manageRestartingTask))
            .addState(Rule.from(FAILED).to(DELETED).withCallback(manageTerminatedTasks))
            .addState(Rule.from(KILLED).to(DELETED).withCallback(manageTerminatedTasks))
            // TODO(maxim): Re-evaluate if *DELETED states are valid transitions here.
            .addState(Rule.from(KILLING).to(FINISHED, FAILED, KILLED, LOST, DELETED)
                    .withCallback(manageTerminatedTasks))
            .addState(Rule.from(LOST).to(DELETED).withCallback(manageTerminatedTasks))
            .addState(Rule.from(DELETED).noTransitions().withCallback(manageTerminatedTasks))
            // Since we want this action to be performed last in the transition sequence, the callback
            // must be the last chained transition callback.
            .onAnyTransition(new Consumer<Transition<TaskState>>() {
                @Override
                public void accept(final Transition<TaskState> transition) {
                    if (transition.isValidStateChange()) {
                        TaskState from = transition.getFrom();
                        TaskState to = transition.getTo();

                        // TODO(wfarner): Clean up this hack.  This is here to suppress unnecessary work
                        // (save followed by delete), but it shows a wart with this catch-all behavior.
                        // Strongly consider pushing the SAVE_STATE behavior to each transition handler.
                        boolean pendingDeleteHack = !((from == PENDING || from == THROTTLED) && to == KILLING);

                        // Don't bother saving state of a task that is being removed.
                        if (to != DELETED && pendingDeleteHack) {
                            addFollowup(SAVE_STATE);
                        }
                        previousState = Optional.of(from);
                    } else {
                        LOG.error("Illegal state transition attempted: " + transition);
                        ILLEGAL_TRANSITIONS.incrementAndGet();
                    }
                }
            })
            // TODO(wfarner): Consider alternatives to allow exceptions to surface.  This would allow
            // the state machine to surface illegal state transitions and propagate better information
            // to the caller.  As it stands, the caller must implement logic that really belongs in
            // the state machine.  For example, preventing RESTARTING->UPDATING transitions
            // (or for that matter, almost any user-initiated state transition) is awkward.
            .throwOnBadTransition(false).build();
}

From source file:com.spotify.helios.testing.TemporaryJobs.java

void after(final Optional<TemporaryJobReports.ReportWriter> writer) {
    final Optional<TemporaryJobReports.Step> undeploy = writer
            .transform(new Function<TemporaryJobReports.ReportWriter, TemporaryJobReports.Step>() {
                @Override// w  ww  . j  a v a2s.co  m
                public TemporaryJobReports.Step apply(final TemporaryJobReports.ReportWriter writer) {
                    return writer.step("undeploy");
                }
            });
    final List<JobId> jobIds = Lists.newArrayListWithCapacity(jobs.size());

    // Stop the test runner thread
    executor.shutdownNow();
    try {
        final boolean terminated = executor.awaitTermination(30, SECONDS);
        if (!terminated) {
            log.warn("Failed to stop test runner thread");
        }
    } catch (InterruptedException ignore) {
    }

    final List<AssertionError> errors = newArrayList();

    for (final TemporaryJob job : jobs) {
        jobIds.add(job.job().getId());
        job.undeploy(errors);
    }

    for (final TemporaryJobReports.Step step : undeploy.asSet()) {
        step.tag("jobs", jobIds);
    }

    for (final AssertionError error : errors) {
        log.error(error.getMessage());
    }

    // Don't delete the prefix file if any errors occurred during undeployment, so that we'll
    // try to undeploy them the next time TemporaryJobs is run.
    if (errors.isEmpty()) {
        jobPrefixFile.delete();
        for (final TemporaryJobReports.Step step : undeploy.asSet()) {
            step.markSuccess();
        }
    }

    for (final TemporaryJobReports.Step step : undeploy.asSet()) {
        step.finish();
    }
}