Example usage for com.google.common.collect Iterables skip

List of usage examples for com.google.common.collect Iterables skip

Introduction

In this page you can find the example usage for com.google.common.collect Iterables skip.

Prototype

public static <T> Iterable<T> skip(final Iterable<T> iterable, final int numberToSkip) 

Source Link

Document

Returns a view of iterable that skips its first numberToSkip elements.

Usage

From source file:com.smoketurner.notification.application.store.NotificationStore.java

/**
 * Returns an iterable that skips forward to a given notification ID then
 * only returns count more notifications. If the given notification ID is
 * not found// w ww  .  java  2 s.c o m
 * 
 * @param notifications
 *            Iterable of notifications
 * @param startId
 *            notification ID to start at
 * @param inclusive
 *            Whether to include the startId notification or not
 * @param limitSize
 *            Number of notifications to return
 * @return Iterable containing the subset of the original notifications
 */
public Iterable<Notification> skip(@Nonnull final Iterable<Notification> notifications, final long startId,
        final boolean inclusive, final int limitSize) {
    Objects.requireNonNull(notifications);
    final int position = indexOf(notifications, startId);
    if (position == -1) {
        return Iterables.limit(notifications, limitSize);
    }
    if (inclusive) {
        return Iterables.limit(Iterables.skip(notifications, position), limitSize);
    }
    return Iterables.limit(Iterables.skip(notifications, position + 1), limitSize);
}

From source file:org.opendaylight.yangtools.yang.model.util.SchemaContextUtil.java

private static Iterable<QName> nextLevel(final Iterable<QName> path) {
    return Iterables.skip(path, 1);
}

From source file:com.facebook.presto.sql.analyzer.TupleAnalyzer.java

@Override
protected TupleDescriptor visitUnion(Union node, AnalysisContext context) {
    checkState(node.getRelations().size() >= 2);

    TupleAnalyzer analyzer = new TupleAnalyzer(analysis, session, metadata, sqlParser,
            experimentalSyntaxEnabled);/*  w  ww .j a va 2 s  .co  m*/

    // Use the first descriptor as the output descriptor for the UNION
    TupleDescriptor outputDescriptor = analyzer.process(node.getRelations().get(0), context)
            .withOnlyVisibleFields();

    for (Relation relation : Iterables.skip(node.getRelations(), 1)) {
        TupleDescriptor descriptor = analyzer.process(relation, context).withOnlyVisibleFields();
        int outputFieldSize = outputDescriptor.getVisibleFields().size();
        int descFieldSize = descriptor.getVisibleFields().size();
        if (outputFieldSize != descFieldSize) {
            throw new SemanticException(MISMATCHED_SET_COLUMN_TYPES, node,
                    "union query has different number of fields: %d, %d", outputFieldSize, descFieldSize);
        }
        for (int i = 0; i < descriptor.getVisibleFields().size(); i++) {
            Type outputFieldType = outputDescriptor.getFieldByIndex(i).getType();
            Type descFieldType = descriptor.getFieldByIndex(i).getType();
            if (!outputFieldType.equals(descFieldType)) {
                throw new SemanticException(TYPE_MISMATCH, node,
                        "column %d in union query has incompatible types: %s, %s", i,
                        outputFieldType.getDisplayName(), descFieldType.getDisplayName());
            }
        }
    }

    analysis.setOutputDescriptor(node, outputDescriptor);
    return outputDescriptor;
}

From source file:org.opensha2.geo.Region.java

void initBuffered(LocationList line, double buffer) {
    checkNotNull(line, "Supplied LocationList is null");
    checkArgument((buffer > 0 && buffer <= 500), "Buffer [%s] is out of [0 500] km range", buffer);

    // init an Area with first point
    Area area = areaFromBorder(locationCircle(line.first(), buffer));
    // for each subsequent segment, create a box
    // for each subsequent point, create a circle
    Location prevLoc = line.first();
    for (Location loc : Iterables.skip(line, 1)) {
        area.add(areaFromBorder(locationBox(prevLoc, loc, buffer)));
        area.add(areaFromBorder(locationCircle(loc, buffer)));
        prevLoc = loc;//  w  w  w.  j a  v  a 2 s.  com
    }
    this.area = area;
    this.border = borderFromArea(area, true);
}

From source file:org.jclouds.vcloud.director.v1_5.compute.strategy.VCloudDirectorComputeServiceAdapter.java

private VirtualHardwareSection updateVirtualHardwareSectionDisk(VirtualHardwareSection virtualHardwareSection,
        Predicate<ResourceAllocationSettingData> predicate, final BigInteger capacity) {
    return updateVirtualHardwareSection(virtualHardwareSection, predicate,
            new Function<ResourceAllocationSettingData, ResourceAllocationSettingData>() {
                @Override/*from  ww  w.j a v a2  s.  c om*/
                public ResourceAllocationSettingData apply(ResourceAllocationSettingData input) {
                    Set<CimString> oldHostResources = input.getHostResources();
                    CimString oldHostResource = (oldHostResources != null)
                            ? Iterables.getFirst(oldHostResources, null)
                            : null;
                    if (oldHostResource != null) {
                        boolean overriddenCapacity = false;
                        Map<QName, String> oldHostResourceAttribs = oldHostResource.getOtherAttributes();
                        Map<QName, String> newHostResourceAttribs = Maps.newLinkedHashMap();
                        for (Map.Entry<QName, String> entry : oldHostResourceAttribs.entrySet()) {
                            QName key = entry.getKey();
                            String val = entry.getValue();
                            if ("capacity".equals(key.getLocalPart())) {
                                val = capacity.toString();
                                overriddenCapacity = true;
                            }
                            newHostResourceAttribs.put(key, val);
                        }
                        if (overriddenCapacity) {
                            CimString newHostResource = new CimString(oldHostResource.getValue(),
                                    newHostResourceAttribs);
                            Iterable<CimString> newHostResources = Iterables.concat(
                                    ImmutableList.of(newHostResource), Iterables.skip(oldHostResources, 1));
                            return input.toBuilder().hostResources(newHostResources).build();
                        } else {
                            logger.warn(
                                    "Unable to find capacity in Host Resource for disk %s in hardware section; cannot resize disk to %s",
                                    input, capacity);
                        }
                    } else {
                        logger.warn(
                                "Unable to find Host Resource for disk %s in hardware section; cannot resize disk to %s",
                                input, capacity);
                    }
                    return input;
                }

                @Override
                public String toString() {
                    return "disk = " + capacity;
                }
            });
}

From source file:org.apache.solr.analytics.accumulator.FacetingAccumulator.java

@Override
@SuppressWarnings("unchecked")
public NamedList<?> export() {
    final NamedList<Object> base = (NamedList<Object>) super.export();
    NamedList<NamedList<?>> facetList = new NamedList<>();

    // Add the field facet buckets to the output
    base.add("fieldFacets", facetList);
    for (FieldFacetRequest freq : request.getFieldFacets()) {
        final String name = freq.getName();
        if (hiddenFieldFacets.contains(name)) {
            continue;
        }/*from   w  w w. ja  va  2  s. com*/
        final Map<String, Expression[]> buckets = fieldFacetExpressions.get(name);
        final NamedList<Object> bucketBase = new NamedList<>();

        Iterable<Entry<String, Expression[]>> iter = buckets.entrySet();

        final FieldFacetRequest fr = (FieldFacetRequest) freq;

        final FacetSortSpecification sort = fr.getSort();
        final int limit = fr.getLimit();
        final int offset = fr.getOffset();
        final boolean showMissing = fr.showsMissing();
        if (!showMissing) {
            buckets.remove(MISSING_VALUE);
        }
        // Sorting the buckets if a sort specification is provided
        if (sort != null && buckets.values().iterator().hasNext()) {
            int sortPlace = Arrays.binarySearch(expressionNames, sort.getStatistic());
            final Expression first = buckets.values().iterator().next()[sortPlace];
            final Comparator<Expression> comp = (Comparator<Expression>) first.comparator(sort.getDirection());

            final List<Entry<String, Expression[]>> sorted = new ArrayList<>(buckets.size());
            Iterables.addAll(sorted, iter);
            Collections.sort(sorted, new EntryComparator(comp, sortPlace));
            iter = sorted;
        }
        // apply the limit
        if (limit > AnalyticsContentHandler.DEFAULT_FACET_LIMIT) {
            if (offset > 0) {
                iter = Iterables.skip(iter, offset);
            }
            iter = Iterables.limit(iter, limit);
        }

        // Export each expression in the bucket.
        for (Entry<String, Expression[]> bucket : iter) {
            bucketBase.add(bucket.getKey(), export(bucket.getValue()));
        }

        facetList.add(name, bucketBase);
    }

    // Add the range facet buckets to the output
    facetList = new NamedList<>();
    base.add("rangeFacets", facetList);
    for (RangeFacetRequest freq : request.getRangeFacets()) {
        final String name = freq.getName();
        final Map<String, Expression[]> buckets = rangeFacetExpressions.get(name);
        final NamedList<Object> bucketBase = new NamedList<>();

        Iterable<Entry<String, Expression[]>> iter = buckets.entrySet();

        for (Entry<String, Expression[]> bucket : iter) {
            bucketBase.add(bucket.getKey(), export(bucket.getValue()));
        }

        facetList.add(name, bucketBase);
    }

    // Add the query facet buckets to the output
    facetList = new NamedList<>();
    base.add("queryFacets", facetList);
    for (QueryFacetRequest freq : request.getQueryFacets()) {
        final String name = freq.getName();
        final Map<String, Expression[]> buckets = queryFacetExpressions.get(name);
        final NamedList<Object> bucketBase = new NamedList<>();

        Iterable<Entry<String, Expression[]>> iter = buckets.entrySet();

        for (Entry<String, Expression[]> bucket : iter) {
            bucketBase.add(bucket.getKey(), export(bucket.getValue()));
        }

        facetList.add(name, bucketBase);
    }

    return base;
}

From source file:org.opennms.features.newts.converter.NewtsConverter.java

private ResourcePath buildResourcePath(final Path resourceDir) {
    final ResourcePath resourcePath;
    final Path relativeResourceDir = this.rrdDir.relativize(resourceDir);

    // Transform store-by-id path into store-by-foreign-source path
    if (relativeResourceDir.startsWith(Paths.get("snmp"))
            && !relativeResourceDir.startsWith(Paths.get("snmp", "fs"))) {

        // The part after snmp/ is considered the node ID
        final int nodeId = Integer.valueOf(relativeResourceDir.getName(1).toString());

        // Get the foreign source for the node
        final ForeignId foreignId = foreignIds.get(nodeId);
        if (foreignId == null) {
            return null;
        }/*from w  w w.  ja  va  2s .  c o m*/

        // Make a store-by-foreign-source compatible path by using the found foreign ID and append the remaining path as-is
        resourcePath = ResourcePath.get(
                ResourcePath.get(ResourcePath.get("snmp", "fs"), foreignId.foreignSource, foreignId.foreignId),
                Iterables.transform(Iterables.skip(relativeResourceDir, 2), Path::toString));

    } else {
        resourcePath = ResourcePath.get(relativeResourceDir);
    }
    return resourcePath;
}

From source file:org.opendaylight.openflowplugin.applications.frsync.impl.strategy.SyncPlanPushStrategyIncrementalImpl.java

ListenableFuture<RpcResult<Void>> addMissingGroups(final NodeId nodeId,
        final InstanceIdentifier<FlowCapableNode> nodeIdent, final List<ItemSyncBox<Group>> groupsAddPlan,
        final SyncCrudCounters counters) {
    if (groupsAddPlan.isEmpty()) {
        LOG.trace("no groups configured for node: {} -> SKIPPING", nodeId.getValue());
        return RpcResultBuilder.<Void>success().buildFuture();
    }// ww w.j  av a 2s  . c  o m

    ListenableFuture<RpcResult<Void>> chainedResult;
    try {
        if (!groupsAddPlan.isEmpty()) {
            final CrudCounts groupCrudCounts = counters.getGroupCrudCounts();
            groupCrudCounts.setAdded(ReconcileUtil.countTotalPushed(groupsAddPlan));
            groupCrudCounts.setUpdated(ReconcileUtil.countTotalUpdated(groupsAddPlan));

            if (LOG.isDebugEnabled()) {
                LOG.debug("adding groups: planSteps={}, toAddTotal={}, toUpdateTotal={}", groupsAddPlan.size(),
                        groupCrudCounts.getAdded(), groupCrudCounts.getUpdated());
            }

            chainedResult = flushAddGroupPortionAndBarrier(nodeIdent, groupsAddPlan.get(0));
            for (final ItemSyncBox<Group> groupsPortion : Iterables.skip(groupsAddPlan, 1)) {
                chainedResult = Futures.transform(chainedResult,
                        new AsyncFunction<RpcResult<Void>, RpcResult<Void>>() {
                            @Override
                            public ListenableFuture<RpcResult<Void>> apply(final RpcResult<Void> input)
                                    throws Exception {
                                final ListenableFuture<RpcResult<Void>> result;
                                if (input.isSuccessful()) {
                                    result = flushAddGroupPortionAndBarrier(nodeIdent, groupsPortion);
                                } else {
                                    // pass through original unsuccessful rpcResult
                                    result = Futures.immediateFuture(input);
                                }

                                return result;
                            }
                        });
            }
        } else {
            chainedResult = RpcResultBuilder.<Void>success().buildFuture();
        }
    } catch (IllegalStateException e) {
        chainedResult = RpcResultBuilder.<Void>failed()
                .withError(RpcError.ErrorType.APPLICATION, "failed to add missing groups", e).buildFuture();
    }

    return chainedResult;
}

From source file:org.opendaylight.yangtools.yang.data.impl.leafref.LeafRefValidatation.java

private static Iterable<QNameWithPredicate> nextLevel(final Iterable<QNameWithPredicate> path) {
    return Iterables.skip(path, 1);
}

From source file:org.onos.yangtools.yang.model.util.SchemaContextUtil.java

/**
 * @throws IllegalArgumentException//from   www .  j  av a2 s. com
 *
 * @param context
 *            Schema Context
 * @param module
 *            Yang Module
 * @param relativeXPath
 *            Non conditional Revision Aware Relative XPath
 * @param actualSchemaNode
 *            actual schema node
 * @return list of QName
 */
private static Iterable<QName> resolveRelativeXPath(final SchemaContext context, final Module module,
        final RevisionAwareXPath relativeXPath, final SchemaNode actualSchemaNode) {
    Preconditions.checkArgument(context != null, "Schema Context reference cannot be NULL");
    Preconditions.checkArgument(module != null, "Module reference cannot be NULL");
    Preconditions.checkArgument(relativeXPath != null, "Non Conditional Revision Aware XPath cannot be NULL");
    Preconditions.checkState(!relativeXPath.isAbsolute(),
            "Revision Aware XPath MUST be relative i.e. MUST contains ../, "
                    + "for non relative Revision Aware XPath use findDataSchemaNode method");
    Preconditions.checkState(actualSchemaNode.getPath() != null,
            "Schema Path reference for Leafref cannot be NULL");

    final Iterable<String> xpaths = SLASH_SPLITTER.split(relativeXPath.toString());

    // Find out how many "parent" components there are
    // FIXME: is .contains() the right check here?
    // FIXME: case ../../node1/node2/../node3/../node4
    int colCount = 0;
    for (final Iterator<String> it = xpaths.iterator(); it.hasNext() && it.next().contains("..");) {
        ++colCount;
    }

    final Iterable<QName> schemaNodePath = actualSchemaNode.getPath().getPathFromRoot();

    if (Iterables.size(schemaNodePath) - colCount >= 0) {
        return Iterables.concat(Iterables.limit(schemaNodePath, Iterables.size(schemaNodePath) - colCount),
                Iterables.transform(Iterables.skip(xpaths, colCount), new Function<String, QName>() {
                    @Override
                    public QName apply(final String input) {
                        return stringPathPartToQName(context, module, input);
                    }
                }));
    }
    return Iterables.concat(schemaNodePath,
            Iterables.transform(Iterables.skip(xpaths, colCount), new Function<String, QName>() {
                @Override
                public QName apply(final String input) {
                    return stringPathPartToQName(context, module, input);
                }
            }));
}