Example usage for com.google.common.collect Iterables skip

List of usage examples for com.google.common.collect Iterables skip

Introduction

In this page you can find the example usage for com.google.common.collect Iterables skip.

Prototype

public static <T> Iterable<T> skip(final Iterable<T> iterable, final int numberToSkip) 

Source Link

Document

Returns a view of iterable that skips its first numberToSkip elements.

Usage

From source file:com.google.devtools.build.lib.analysis.actions.SpawnAction.java

/**
 * Returns the (immutable) list of arguments, excluding the command name,
 * argv[0].//from   w  ww .j  a  va  2  s.  co  m
 */
@VisibleForTesting
public List<String> getRemainingArguments() {
    return ImmutableList.copyOf(Iterables.skip(argv.arguments(), 1));
}

From source file:com.google.javascript.jscomp.AstFactory.java

private Node createQName(Scope scope, Iterable<String> names) {
    String baseName = checkNotNull(Iterables.getFirst(names, null));
    Iterable<String> propertyNames = Iterables.skip(names, 1);
    Node baseNameNode = createName(scope, baseName);
    return createGetProps(baseNameNode, propertyNames);
}

From source file:io.prestosql.operator.WindowOperator.java

public WindowOperator(OperatorContext operatorContext, List<Type> sourceTypes, List<Integer> outputChannels,
        List<WindowFunctionDefinition> windowFunctionDefinitions, List<Integer> partitionChannels,
        List<Integer> preGroupedChannels, List<Integer> sortChannels, List<SortOrder> sortOrder,
        int preSortedChannelPrefix, int expectedPositions, PagesIndex.Factory pagesIndexFactory) {
    requireNonNull(operatorContext, "operatorContext is null");
    requireNonNull(outputChannels, "outputChannels is null");
    requireNonNull(windowFunctionDefinitions, "windowFunctionDefinitions is null");
    requireNonNull(partitionChannels, "partitionChannels is null");
    requireNonNull(preGroupedChannels, "preGroupedChannels is null");
    checkArgument(partitionChannels.containsAll(preGroupedChannels),
            "preGroupedChannels must be a subset of partitionChannels");
    requireNonNull(sortChannels, "sortChannels is null");
    requireNonNull(sortOrder, "sortOrder is null");
    requireNonNull(pagesIndexFactory, "pagesIndexFactory is null");
    checkArgument(sortChannels.size() == sortOrder.size(),
            "Must have same number of sort channels as sort orders");
    checkArgument(preSortedChannelPrefix <= sortChannels.size(),
            "Cannot have more pre-sorted channels than specified sorted channels");
    checkArgument(/* w  w w  .  j a  v a 2 s  . co  m*/
            preSortedChannelPrefix == 0
                    || ImmutableSet.copyOf(preGroupedChannels).equals(ImmutableSet.copyOf(partitionChannels)),
            "preSortedChannelPrefix can only be greater than zero if all partition channels are pre-grouped");

    this.operatorContext = operatorContext;
    this.localUserMemoryContext = operatorContext.localUserMemoryContext();
    this.outputChannels = Ints.toArray(outputChannels);
    this.windowFunctions = windowFunctionDefinitions.stream()
            .map(functionDefinition -> new FramedWindowFunction(functionDefinition.createWindowFunction(),
                    functionDefinition.getFrameInfo()))
            .collect(toImmutableList());

    List<Type> types = Stream
            .concat(outputChannels.stream().map(sourceTypes::get),
                    windowFunctionDefinitions.stream().map(WindowFunctionDefinition::getType))
            .collect(toImmutableList());

    this.pagesIndex = pagesIndexFactory.newPagesIndex(sourceTypes, expectedPositions);
    this.preGroupedChannels = Ints.toArray(preGroupedChannels);
    this.preGroupedPartitionHashStrategy = pagesIndex.createPagesHashStrategy(preGroupedChannels,
            OptionalInt.empty());
    List<Integer> unGroupedPartitionChannels = partitionChannels.stream()
            .filter(channel -> !preGroupedChannels.contains(channel)).collect(toImmutableList());
    this.unGroupedPartitionHashStrategy = pagesIndex.createPagesHashStrategy(unGroupedPartitionChannels,
            OptionalInt.empty());
    List<Integer> preSortedChannels = sortChannels.stream().limit(preSortedChannelPrefix)
            .collect(toImmutableList());
    this.preSortedPartitionHashStrategy = pagesIndex.createPagesHashStrategy(preSortedChannels,
            OptionalInt.empty());
    this.peerGroupHashStrategy = pagesIndex.createPagesHashStrategy(sortChannels, OptionalInt.empty());

    this.pageBuilder = new PageBuilder(types);

    if (preSortedChannelPrefix > 0) {
        // This already implies that set(preGroupedChannels) == set(partitionChannels) (enforced with checkArgument)
        this.orderChannels = ImmutableList.copyOf(Iterables.skip(sortChannels, preSortedChannelPrefix));
        this.ordering = ImmutableList.copyOf(Iterables.skip(sortOrder, preSortedChannelPrefix));
    } else {
        // Otherwise, we need to sort by the unGroupedPartitionChannels and all original sort channels
        this.orderChannels = ImmutableList.copyOf(concat(unGroupedPartitionChannels, sortChannels));
        this.ordering = ImmutableList
                .copyOf(concat(nCopies(unGroupedPartitionChannels.size(), ASC_NULLS_LAST), sortOrder));
    }

    windowInfo = new WindowInfo.DriverWindowInfoBuilder();
    operatorContext.setInfoSupplier(this::getWindowInfo);
}

From source file:com.wrmsr.wava.yen.parser.ModuleFactory.java

private void parseFunctionType(ListElement s) {
    int i = 1;/* www  . j a  v  a  2  s . com*/
    Optional<Name> name = Optional.empty();
    Element first = s.get(i);
    if (first instanceof StringElement) {
        name = Optional.of(Name.of(first.string()));
        i++;
    }
    Type result = Type.NONE;
    List<Type> params = new ArrayList<>();
    for (Element e : Iterables.skip(s.get(i).list(), 1)) {
        ListElement curr = (ListElement) e;
        String str = curr.get(0).string();
        if (str.equals(PARAM)) {
            for (int j = 1; j < curr.size(); j++) {
                params.add(Type.of(curr.get(j).string()));
            }
        } else if (str.equals(RESULT)) {
            result = Type.of(curr.get(1).string());
        }
    }
    NamedFunctionType ft = new NamedFunctionType(name, result, params);
    builder.addFunctionType(ft);
}

From source file:com.facebook.buck.cxx.PreprocessorDelegate.java

public String hashCommand(ImmutableList<String> flags) {
    Hasher hasher = Hashing.murmur3_128().newHasher();
    String workingDirString = workingDir.toString();
    // Skips the executable argument (the first one) as that is not sanitized.
    for (String part : sanitizer.sanitizeFlags(Iterables.skip(flags, 1))) {
        // TODO(#10251354): find a better way of dealing with getting a project dir normalized hash
        if (part.startsWith(workingDirString)) {
            part = "<WORKINGDIR>" + part.substring(workingDirString.length());
        }//w  ww .j  a  v a2s.  com
        hasher.putString(part, Charsets.UTF_8);
        hasher.putBoolean(false); // separator
    }
    return hasher.hash().toString();
}

From source file:com.google.gerrit.server.project.ProjectState.java

/**
 * @return an iterable that walks through the parents of this project. Starts
 *         from the immediate parent of this project and progresses up the
 *         hierarchy to All-Projects./*from w w w .  j  av  a2s . com*/
 */
public Iterable<ProjectState> parents() {
    return Iterables.skip(tree(), 1);
}

From source file:com.continuuity.loom.scheduler.JobScheduler.java

ClusterTask scheduleRetry(ClusterJob job, ClusterTask task, String queueName) throws Exception {
    // Schedule rollback task before retrying
    scheduleRollbackTask(task, queueName);

    task.addAttempt();//w  w  w  .j  av a2  s.  c  o  m
    List<ClusterTask> retryTasks = taskService.getRetryTask(task);

    if (retryTasks.size() == 1) {
        LOG.trace("Only one retry task for job {} for task {}", job, task);
        return retryTasks.get(0);
    }

    // store all retry tasks
    for (ClusterTask t : retryTasks) {
        clusterStore.writeClusterTask(t);
    }

    // Remove self from current stage
    job.getCurrentStage().remove(task.getTaskId());
    // Add first retry task to current stage
    job.getCurrentStage().add(retryTasks.get(0).getTaskId());
    // Add the rest of retry tasks after current stage. TODO: this needs to be revisited.
    job.insertTasksAfterCurrentStage(ImmutableList
            .copyOf(Iterables.transform(Iterables.skip(retryTasks, 1), CLUSTER_TASK_STRING_FUNCTION)));
    LOG.trace("Retry job {} for task {}", job, task);

    return retryTasks.get(0);
}

From source file:org.immutables.sequence.Sequence.java

/**
 * Returns a view of this fluent iterable that skips its first {@code numberToSkip} elements. If
 * this fluent iterable contains fewer than {@code numberToSkip} elements,
 * the returned fluent iterable skips all of its elements.
 * <p>//from  w  w w  . j a  v  a2  s.  co  m
 * Modifications to this fluent iterable before a call to {@code iterator()} are reflected in the
 * returned fluent iterable. That is, the its iterator skips the first {@code numberToSkip}
 * elements that exist when the iterator is created, not when {@code skip()} is called.
 * <p>
 * The returned fluent iterable's iterator supports {@code remove()} if the {@code Iterator} of
 * this fluent iterable supports it. Note that it is <i>not</i> possible to delete the last
 * skipped element by immediately calling {@code remove()} on the returned fluent iterable's
 * iterator, as the {@code Iterator} contract states that a call to {@code * remove()} before a
 * call to {@code next()} will throw an {@link IllegalStateException}.
 * @param numberToSkip the number to skip
 * @return the sequence
 */
@CheckReturnValue
public final Sequence<E> skip(int numberToSkip) {
    return from(Iterables.skip(iterable, numberToSkip));
}

From source file:org.apache.mahout.clustering.streaming.cluster.BallKMeans.java

/**
 * Selects some of the original points according to the k-means++ algorithm.  The basic idea is that
 * points are selected with probability proportional to their distance from any selected point.  In
 * this version, points have weights which multiply their likelihood of being selected.  This is the
 * same as if there were as many copies of the same point as indicated by the weight.
 *
 * This is pretty expensive, but it vastly improves the quality and convergences of the k-means algorithm.
 * The basic idea can be made much faster by only processing a random subset of the original points.
 * In the context of streaming k-means, the total number of possible seeds will be about k log n so this
 * selection will cost O(k^2 (log n)^2) which isn't much worse than the random sampling idea.  At
 * n = 10^9, the cost of this initialization will be about 10x worse than a reasonable random sampling
 * implementation./*from  w w  w .java  2s.co  m*/
 *
 * The side effect of this method is to fill the centroids structure itself.
 *
 * @param datapoints The datapoints to select from.  These datapoints should be WeightedVectors of some kind.
 */
private void initializeSeedsKMeansPlusPlus(List<? extends WeightedVector> datapoints) {
    Preconditions.checkArgument(datapoints.size() > 1,
            "Must have at least two datapoints points to cluster " + "sensibly");
    Preconditions.checkArgument(datapoints.size() >= numClusters,
            String.format("Must have more datapoints [%d] than clusters [%d]", datapoints.size(), numClusters));
    // Compute the centroid of all of the datapoints.  This is then used to compute the squared radius of the datapoints.
    Centroid center = new Centroid(datapoints.iterator().next());
    for (WeightedVector row : Iterables.skip(datapoints, 1)) {
        center.update(row);
    }

    // Given the centroid, we can compute \Delta_1^2(X), the total squared distance for the datapoints
    // this accelerates seed selection.
    double deltaX = 0;
    DistanceMeasure distanceMeasure = centroids.getDistanceMeasure();
    for (WeightedVector row : datapoints) {
        deltaX += distanceMeasure.distance(row, center);
    }

    // Find the first seed c_1 (and conceptually the second, c_2) as might be done in the 2-means clustering so that
    // the probability of selecting c_1 and c_2 is proportional to || c_1 - c_2 ||^2.  This is done
    // by first selecting c_1 with probability:
    //
    // p(c_1) = sum_{c_1} || c_1 - c_2 ||^2 \over sum_{c_1, c_2} || c_1 - c_2 ||^2
    //
    // This can be simplified to:
    //
    // p(c_1) = \Delta_1^2(X) + n || c_1 - c ||^2 / (2 n \Delta_1^2(X))
    //
    // where c = \sum x / n and \Delta_1^2(X) = sum || x - c ||^2
    //
    // All subsequent seeds c_i (including c_2) can then be selected from the remaining points with probability
    // proportional to Pr(c_i == x_j) = min_{m < i} || c_m - x_j ||^2.

    // Multinomial distribution of vector indices for the selection seeds. These correspond to
    // the indices of the vectors in the original datapoints list.
    Multinomial<Integer> seedSelector = new Multinomial<Integer>();
    for (int i = 0; i < datapoints.size(); ++i) {
        double selectionProbability = deltaX
                + datapoints.size() * distanceMeasure.distance(datapoints.get(i), center);
        seedSelector.add(i, selectionProbability);
    }

    int selected = random.nextInt(datapoints.size());
    Centroid c_1 = new Centroid(datapoints.get(selected).clone());
    c_1.setIndex(0);
    // Construct a set of weighted things which can be used for random selection.  Initial weights are
    // set to the squared distance from c_1
    for (int i = 0; i < datapoints.size(); ++i) {
        WeightedVector row = datapoints.get(i);
        double w = distanceMeasure.distance(c_1, row) * 2 * Math.log(1 + row.getWeight());
        seedSelector.set(i, w);
    }

    // From here, seeds are selected with probability proportional to:
    //
    // r_i = min_{c_j} || x_i - c_j ||^2
    //
    // when we only have c_1, we have already set these distances and as we select each new
    // seed, we update the minimum distances.
    centroids.add(c_1);
    int clusterIndex = 1;
    while (centroids.size() < numClusters) {
        // Select according to weights.
        int seedIndex = seedSelector.sample();
        Centroid nextSeed = new Centroid(datapoints.get(seedIndex));
        nextSeed.setIndex(clusterIndex++);
        centroids.add(nextSeed);
        // Don't select this one again.
        seedSelector.delete(seedIndex);
        // Re-weight everything according to the minimum distance to a seed.
        for (int currSeedIndex : seedSelector) {
            WeightedVector curr = datapoints.get(currSeedIndex);
            double newWeight = nextSeed.getWeight() * distanceMeasure.distance(nextSeed, curr);
            if (newWeight < seedSelector.getWeight(currSeedIndex)) {
                seedSelector.set(currSeedIndex, newWeight);
            }
        }
    }
}

From source file:org.apache.aurora.scheduler.thrift.ReadOnlySchedulerImpl.java

private List<ScheduledTask> getTasks(TaskQuery query) {
    requireNonNull(query);/* w  w w  . j  a  v a  2s  .  com*/

    Iterable<IScheduledTask> tasks = Storage.Util.fetchTasks(storage, Query.arbitrary(query));
    if (query.getOffset() > 0) {
        tasks = Iterables.skip(tasks, query.getOffset());
    }
    if (query.getLimit() > 0) {
        tasks = Iterables.limit(tasks, query.getLimit());
    }

    return IScheduledTask.toBuildersList(tasks);
}