Example usage for com.google.common.collect Iterables isEmpty

List of usage examples for com.google.common.collect Iterables isEmpty

Introduction

In this page you can find the example usage for com.google.common.collect Iterables isEmpty.

Prototype

public static boolean isEmpty(Iterable<?> iterable) 

Source Link

Document

Determines if the given iterable contains no elements.

Usage

From source file:org.apache.flex.compiler.internal.targets.SWFTarget.java

@Override
public ISWF build(Collection<ICompilerProblem> problems) {
    buildStarted();/*from  w w  w.j  ava 2s. c o  m*/
    try {
        Iterable<ICompilerProblem> fatalProblems = getFatalProblems();
        if (!Iterables.isEmpty(fatalProblems)) {
            Iterables.addAll(problems, fatalProblems);
            return null;
        }

        Set<ICompilationUnit> compilationUnitSet = new HashSet<ICompilationUnit>();
        Target.RootedCompilationUnits rootedCompilationUnits = getRootedCompilationUnits();

        // no rooted compilation could be found, but still create an empty SWF
        // in this error case
        if (rootedCompilationUnits.getUnits().isEmpty())
            return buildEmptySWF();

        compilationUnitSet.addAll(rootedCompilationUnits.getUnits());

        this.problemCollection = problems;

        FramesInformation frames = getFramesInformation();

        BuiltCompilationUnitSet builtCompilationUnits = getBuiltCompilationUnitSet();
        Iterables.addAll(problems, builtCompilationUnits.problems);

        doPostBuildWork(builtCompilationUnits.compilationUnits, problems);

        ISWF swf = initializeSWF(getReachableCompilationUnitsInSWFOrder(rootedCompilationUnits.getUnits()));

        // now that everything is built, the dependency graph is populated enough to do a topological sort on
        // all compilation units needed by this target.
        // The compilation units that define bases classes must occurs in the swf before
        // compilation units that define classes that subclass those classes ( see
        // inheritance dependencies in the {@link DependencyGraph} ).
        Set<ICompilationUnit> emittedCompilationUnits = new HashSet<ICompilationUnit>();

        frames.createFrames(this, swf, builtCompilationUnits.compilationUnits, emittedCompilationUnits,
                problems);

        createLinkReport(problems);

        // "Link" the resulting swf, if the optimize flag is set
        return linkSWF(swf);
    } catch (BuildCanceledException bce) {
        return null;
    } catch (InterruptedException ie) {
        return null;
    } finally {
        buildFinished();
    }
}

From source file:com.facebook.buck.parser.AbstractParser.java

private TargetGraph buildTargetGraph(PerBuildState state, Iterable<BuildTarget> toExplore,
        AtomicLong processedBytes) throws IOException, InterruptedException, BuildFileParseException {

    if (Iterables.isEmpty(toExplore)) {
        return TargetGraph.EMPTY;
    }/*www. j av a  2  s .  com*/

    MutableDirectedGraph<TargetNode<?>> graph = new MutableDirectedGraph<>();
    Map<BuildTarget, TargetNode<?>> index = new HashMap<>();

    ParseEvent.Started parseStart = ParseEvent.started(toExplore);
    eventBus.post(parseStart);

    GraphTraversable<BuildTarget> traversable = target -> {
        TargetNode<?> node;
        try {
            node = state.getTargetNode(target);
        } catch (BuildFileParseException e) {
            throw new RuntimeException(e);
        }

        // this second lookup loop may *seem* pointless, but it allows us to report which node is
        // referring to a node we can't find - something that's very difficult in this Traversable
        // visitor pattern otherwise.
        // it's also work we need to do anyways. the getTargetNode() result is cached, so that
        // when we come around and re-visit that node there won't actually be any work performed.
        for (BuildTarget dep : node.getParseDeps()) {
            try {
                state.getTargetNode(dep);
            } catch (BuildFileParseException e) {
                throw ParserMessages.createReadableExceptionWithWhenSuffix(target, dep, e);
            } catch (HumanReadableException e) {
                throw ParserMessages.createReadableExceptionWithWhenSuffix(target, dep, e);
            }
        }
        return node.getParseDeps().iterator();
    };

    AcyclicDepthFirstPostOrderTraversal<BuildTarget> targetNodeTraversal = new AcyclicDepthFirstPostOrderTraversal<>(
            traversable);

    TargetGraph targetGraph = null;
    try {
        for (BuildTarget target : targetNodeTraversal.traverse(toExplore)) {
            TargetNode<?> targetNode = state.getTargetNode(target);

            Preconditions.checkNotNull(targetNode, "No target node found for %s", target);
            assertTargetIsCompatible(state, targetNode);

            graph.addNode(targetNode);
            MoreMaps.putCheckEquals(index, target, targetNode);
            if (target.isFlavored()) {
                BuildTarget unflavoredTarget = target.withoutFlavors();
                MoreMaps.putCheckEquals(index, unflavoredTarget, state.getTargetNode(unflavoredTarget));
            }
            for (BuildTarget dep : targetNode.getParseDeps()) {
                graph.addEdge(targetNode, state.getTargetNode(dep));
            }
        }

        targetGraph = new TargetGraph(graph, ImmutableMap.copyOf(index));
        return targetGraph;
    } catch (AcyclicDepthFirstPostOrderTraversal.CycleException e) {
        throw new HumanReadableException(e.getMessage());
    } catch (RuntimeException e) {
        throw propagateRuntimeCause(e);
    } finally {
        eventBus.post(ParseEvent.finished(parseStart, processedBytes.get(), Optional.ofNullable(targetGraph)));
    }
}

From source file:com.google.devtools.build.lib.rules.objc.CrosstoolCompilationSupport.java

@Override
CompilationSupport registerLinkActions(ObjcProvider objcProvider,
        J2ObjcMappingFileProvider j2ObjcMappingFileProvider, J2ObjcEntryClassProvider j2ObjcEntryClassProvider,
        ExtraLinkArgs extraLinkArgs, Iterable<Artifact> extraLinkInputs, DsymOutputType dsymOutputType)
        throws InterruptedException {
    Iterable<Artifact> prunedJ2ObjcArchives = computeAndStripPrunedJ2ObjcArchives(j2ObjcEntryClassProvider,
            j2ObjcMappingFileProvider, objcProvider);
    ImmutableList<Artifact> bazelBuiltLibraries = Iterables.isEmpty(prunedJ2ObjcArchives)
            ? objcProvider.getObjcLibraries()
            : substituteJ2ObjcPrunedLibraries(objcProvider);

    Artifact inputFileList = intermediateArtifacts.linkerObjList();
    ImmutableSet<Artifact> forceLinkArtifacts = getForceLoadArtifacts(objcProvider);

    Iterable<Artifact> objFiles = Iterables.concat(bazelBuiltLibraries, objcProvider.get(IMPORTED_LIBRARY),
            objcProvider.getCcLibraries());
    // Clang loads archives specified in filelists and also specified as -force_load twice,
    // resulting in duplicate symbol errors unless they are deduped.
    objFiles = Iterables.filter(objFiles, Predicates.not(Predicates.in(forceLinkArtifacts)));

    registerObjFilelistAction(objFiles, inputFileList);

    LinkTargetType linkType = (objcProvider.is(Flag.USES_CPP)) ? LinkTargetType.OBJCPP_EXECUTABLE
            : LinkTargetType.OBJC_EXECUTABLE;

    ObjcVariablesExtension extension = new ObjcVariablesExtension.Builder().setRuleContext(ruleContext)
            .setObjcProvider(objcProvider).setConfiguration(ruleContext.getConfiguration())
            .setIntermediateArtifacts(intermediateArtifacts).setFrameworkNames(frameworkNames(objcProvider))
            .setLibraryNames(libraryNames(objcProvider))
            .setForceLoadArtifacts(getForceLoadArtifacts(objcProvider))
            .setAttributeLinkopts(attributes.linkopts())
            .addVariableCategory(VariableCategory.EXECUTABLE_LINKING_VARIABLES).build();

    Artifact binaryToLink = getBinaryToLink();
    CppLinkAction executableLinkAction = new CppLinkActionBuilder(ruleContext, binaryToLink)
            .setMnemonic("ObjcLink").addActionInputs(bazelBuiltLibraries)
            .addActionInputs(objcProvider.getCcLibraries())
            .addTransitiveActionInputs(objcProvider.get(IMPORTED_LIBRARY))
            .addTransitiveActionInputs(objcProvider.get(STATIC_FRAMEWORK_FILE))
            .addTransitiveActionInputs(objcProvider.get(DYNAMIC_FRAMEWORK_FILE))
            .setCrosstoolInputs(CppHelper.getToolchain(ruleContext).getLink())
            .addActionInputs(prunedJ2ObjcArchives).addActionInput(inputFileList).setLinkType(linkType)
            .setLinkStaticness(LinkStaticness.FULLY_STATIC).addVariablesExtension(extension)
            .setFeatureConfiguration(getFeatureConfiguration(ruleContext)).build();
    ruleContext.registerAction(executableLinkAction);

    return this;
}

From source file:google.registry.rdap.RdapDomainSearchAction.java

/** Searches for domains by nameserver name, returning a JSON array of domain info maps. */
private RdapSearchResults searchByNameserverLdhName(final RdapSearchPattern partialStringQuery,
        final DateTime now) {
    Iterable<Key<HostResource>> hostKeys = getNameserverRefsByLdhName(partialStringQuery, now);
    if (Iterables.isEmpty(hostKeys)) {
        throw new NotFoundException("No matching nameservers found");
    }/*  ww w.  ja va  2s  .  co  m*/
    return searchByNameserverRefs(hostKeys, now);
}

From source file:org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy.java

@SuppressWarnings("resource")
public Collection<AbstractCompactionTask> getMaximalTask(final int gcBefore, boolean splitOutput) {
    Iterable<SSTableReader> filteredSSTables = filterSuspectSSTables(sstables);
    if (Iterables.isEmpty(filteredSSTables))
        return null;
    LifecycleTransaction txn = cfs.getTracker().tryModify(filteredSSTables, OperationType.COMPACTION);
    if (txn == null)
        return null;
    if (splitOutput)
        return Arrays.<AbstractCompactionTask>asList(new SplittingCompactionTask(cfs, txn, gcBefore, false));
    return Arrays.<AbstractCompactionTask>asList(new CompactionTask(cfs, txn, gcBefore, false));
}

From source file:eu.interedition.text.repository.JdbcStore.java

@Override
public void deleteAnnotations(Iterable<Long> ids) {
    if (Iterables.isEmpty(ids)) {
        return;//  ww w.  j  a  v  a 2  s.c  o  m
    }

    try {
        final Long[] idArray = Iterables.toArray(ids, Long.class);
        if (deleteAnnotations == null) {
            deleteAnnotations = connection
                    .prepareStatement("delete from interedition_text_annotation a where a.id in "
                            + "(select id from table(id bigint = ?) ids)");
        }
        deleteAnnotations.setObject(1, idArray);
        deleteAnnotations.executeUpdate();

        txLog.annotationsRemoved(idArray);
    } catch (SQLException e) {
        throw Throwables.propagate(e);
    }

}

From source file:com.google.javascript.rhino.TypeDeclarationsIR.java

/**
 * Represents a union type, which can be one of the given types.
 * Closure accepts syntax like {@code {(number|boolean)}}
 *
 * <p>Example://from  w  w  w.j av a 2  s  .c o m
 * <pre>
 * UNION_TYPE
 *   NUMBER_TYPE
 *   BOOLEAN_TYPE
 * </pre>
 * @param options the types which are accepted
 * @return a new node representing the union type
 */
public static TypeDeclarationNode unionType(Iterable<TypeDeclarationNode> options) {
    Preconditions.checkArgument(!Iterables.isEmpty(options), "union must have at least one option");
    TypeDeclarationNode node = new TypeDeclarationNode(Token.UNION_TYPE);
    for (Node option : options) {
        node.addChildToBack(option);
    }
    return node;
}

From source file:com.jeffjirsa.cassandra.db.compaction.SizeTieredCompactionStrategy.java

@SuppressWarnings("resource")
public Collection<AbstractCompactionTask> getMaximalTask(final int gcBefore, boolean splitOutput) {
    Iterable<SSTableReader> filteredSSTables = filterSuspectSSTables(sstables);
    if (Iterables.isEmpty(filteredSSTables))
        return null;
    LifecycleTransaction txn = cfs.getTracker().tryModify(filteredSSTables, OperationType.COMPACTION);
    if (txn == null)
        return null;
    if (splitOutput)
        return Arrays.<AbstractCompactionTask>asList(new SplittingCompactionTask(cfs, txn, gcBefore));
    return Arrays.<AbstractCompactionTask>asList(new CompactionTask(cfs, txn, gcBefore));
}

From source file:org.apache.aurora.scheduler.cron.quartz.AuroraCronJob.java

@VisibleForTesting
void doExecute(JobExecutionContext context) throws JobExecutionException {
    final IJobKey key = Quartz.auroraJobKey(context.getJobDetail().getKey());
    final String path = JobKeys.canonicalString(key);

    // Prevent a concurrent run for this job in case a previous trigger took longer to run.
    // This approach relies on saving the "work in progress" token within the job context itself
    // (see below) and relying on killFollowups to signal "work completion".
    if (context.getJobDetail().getJobDataMap().containsKey(path)) {
        CRON_JOB_CONCURRENT_RUNS.incrementAndGet();
        if (killFollowups.contains(key)) {
            context.getJobDetail().getJobDataMap().remove(path);
            killFollowups.remove(key);//from w  w w  .  j  a v  a2  s.  c o  m
            LOG.info("Resetting job context for cron {}", path);
        } else {
            LOG.info("Ignoring trigger as another concurrent run is active for cron {}", path);
            return;
        }
    }

    CompletableFuture<NoResult> scheduleResult = batchWorker.<NoResult>execute(storeProvider -> {
        Optional<IJobConfiguration> config = storeProvider.getCronJobStore().fetchJob(key);
        if (!config.isPresent()) {
            LOG.warn("Cron was triggered for {} but no job with that key was found in storage.", path);
            CRON_JOB_MISFIRES.incrementAndGet();
            return BatchWorker.NO_RESULT;
        }

        SanitizedCronJob cronJob;
        try {
            cronJob = SanitizedCronJob.from(new SanitizedConfiguration(config.get()));
        } catch (CronException e) {
            LOG.warn("Invalid cron job for {} in storage - failed to parse", key, e);
            CRON_JOB_PARSE_FAILURES.incrementAndGet();
            return BatchWorker.NO_RESULT;
        }

        CronCollisionPolicy collisionPolicy = cronJob.getCronCollisionPolicy();
        LOG.info("Cron triggered for {} at {} with policy {}", path, new Date(), collisionPolicy);
        CRON_JOB_TRIGGERS.incrementAndGet();

        final Query.Builder activeQuery = Query.jobScoped(key).active();
        Set<String> activeTasks = Tasks.ids(storeProvider.getTaskStore().fetchTasks(activeQuery));

        ITaskConfig task = cronJob.getSanitizedConfig().getJobConfig().getTaskConfig();
        Set<Integer> instanceIds = cronJob.getSanitizedConfig().getInstanceIds();
        if (activeTasks.isEmpty()) {
            stateManager.insertPendingTasks(storeProvider, task, instanceIds);
            return BatchWorker.NO_RESULT;
        }

        CRON_JOB_COLLISIONS.incrementAndGet();
        switch (collisionPolicy) {
        case KILL_EXISTING:
            for (String taskId : activeTasks) {
                stateManager.changeState(storeProvider, taskId, Optional.absent(), KILLING, KILL_AUDIT_MESSAGE);
            }

            LOG.info("Waiting for job to terminate before launching cron job " + path);
            // Use job detail map to signal a "work in progress" condition to subsequent triggers.
            context.getJobDetail().getJobDataMap().put(path, null);
            batchWorker.executeWithReplay(delayedStartBackoff.getBackoffStrategy(), store -> {
                Query.Builder query = Query.taskScoped(activeTasks).active();
                if (Iterables.isEmpty(storeProvider.getTaskStore().fetchTasks(query))) {
                    LOG.info("Initiating delayed launch of cron " + path);
                    stateManager.insertPendingTasks(store, task, instanceIds);
                    return new BatchWorker.Result<>(true, null);
                } else {
                    LOG.info("Not yet safe to run cron " + path);
                    return new BatchWorker.Result<>(false, null);
                }
            }).thenAccept(ignored -> {
                killFollowups.add(key);
                LOG.info("Finished delayed launch for cron " + path);
            });
            break;

        case RUN_OVERLAP:
            LOG.error("Ignoring trigger for job {} with deprecated collision"
                    + "policy RUN_OVERLAP due to unterminated active tasks.", path);
            break;

        case CANCEL_NEW:
            break;

        default:
            LOG.error("Unrecognized cron collision policy: " + collisionPolicy);
        }
        return BatchWorker.NO_RESULT;
    });

    try {
        scheduleResult.get();
    } catch (ExecutionException | InterruptedException e) {
        LOG.warn("Interrupted while trying to launch cron " + path, e);
        Thread.currentThread().interrupt();
        throw new JobExecutionException(e);
    }
}

From source file:org.apache.aurora.scheduler.async.preemptor.PreemptorImpl.java

/**
 * Optional.absent indicates that this slave does not have enough resources to satisfy the task.
 * The empty set indicates the offers (slack) are enough.
 * A set with elements indicates those tasks and the offers are enough.
 *//*from w  w w  .jav a2  s .c  om*/
private Optional<Set<String>> getTasksToPreempt(Iterable<PreemptionVictim> possibleVictims,
        Iterable<HostOffer> offers, IAssignedTask pendingTask, AttributeAggregate jobState) {

    // This enforces the precondition that all of the resources are from the same host. We need to
    // get the host for the schedulingFilter.
    Set<String> hosts = ImmutableSet.<String>builder()
            .addAll(Iterables.transform(possibleVictims, VICTIM_TO_HOST))
            .addAll(Iterables.transform(offers, OFFER_TO_HOST)).build();

    String host = Iterables.getOnlyElement(hosts);

    ResourceSlot slackResources = ResourceSlot.sum(Iterables.transform(offers, OFFER_TO_RESOURCE_SLOT));

    if (!Iterables.isEmpty(offers)) {
        if (Iterables.size(offers) > 1) {
            // There are multiple offers for the same host. Since both have maintenance information
            // we don't preempt with this information and wait for mesos to merge the two offers for
            // us.
            return Optional.absent();
        }
        IHostAttributes attributes = Iterables
                .getOnlyElement(FluentIterable.from(offers).transform(OFFER_TO_ATTRIBUTES).toSet());

        Set<SchedulingFilter.Veto> vetoes = schedulingFilter.filter(
                new UnusedResource(slackResources, attributes),
                new ResourceRequest(pendingTask.getTask(), pendingTask.getTaskId(), jobState));

        if (vetoes.isEmpty()) {
            return Optional.<Set<String>>of(ImmutableSet.<String>of());
        }
    }

    FluentIterable<PreemptionVictim> preemptableTasks = FluentIterable.from(possibleVictims)
            .filter(preemptionFilter(pendingTask.getTask()));

    if (preemptableTasks.isEmpty()) {
        return Optional.absent();
    }

    List<PreemptionVictim> toPreemptTasks = Lists.newArrayList();

    Iterable<PreemptionVictim> sortedVictims = RESOURCE_ORDER.immutableSortedCopy(preemptableTasks);

    for (PreemptionVictim victim : sortedVictims) {
        toPreemptTasks.add(victim);

        ResourceSlot totalResource = ResourceSlot.sum(
                ResourceSlot.sum(Iterables.transform(toPreemptTasks, VICTIM_TO_RESOURCES)), slackResources);

        Optional<IHostAttributes> attributes = getHostAttributes(host);
        if (!attributes.isPresent()) {
            missingAttributes.incrementAndGet();
            continue;
        }

        Set<SchedulingFilter.Veto> vetoes = schedulingFilter.filter(
                new UnusedResource(totalResource, attributes.get()),
                new ResourceRequest(pendingTask.getTask(), pendingTask.getTaskId(), jobState));

        if (vetoes.isEmpty()) {
            Set<String> taskIds = FluentIterable.from(toPreemptTasks).transform(VICTIM_TO_TASK_ID).toSet();
            return Optional.of(taskIds);
        }
    }
    return Optional.absent();
}