Example usage for com.google.common.collect Iterables limit

List of usage examples for com.google.common.collect Iterables limit

Introduction

In this page you can find the example usage for com.google.common.collect Iterables limit.

Prototype

public static <T> Iterable<T> limit(final Iterable<T> iterable, final int limitSize) 

Source Link

Document

Creates an iterable with the first limitSize elements of the given iterable.

Usage

From source file:com.googlecode.blaisemath.util.coordinate.CoordinateManager.java

/** 
 * Call to ensure appropriate size of cache. Should always be called within
 * a synchronization block./*from ww w  .j  a  v  a 2 s  .co  m*/
 */
private void checkCache() {
    int n = inactive.size() - maxCacheSize;
    if (n > 0) {
        Set<S> remove = Sets.newHashSet(Iterables.limit(inactive, n));
        inactive.removeAll(remove);
        map.keySet().removeAll(remove);
    }
}

From source file:com.sun.tools.hat.internal.server.QueryHandler.java

private void printDetail(Model model, int size) {
    if (model != null) {
        model.visit(new ModelVisitor() {
            @Override//from w  w w . jav a  2  s.  c  o m
            public void visit(ScalarModel model) {
            }

            @Override
            public void visit(CollectionModel model) {
                out.print(" [");
                Collection<JavaThing> collection = model.getCollection();
                boolean first = true;
                for (JavaThing thing : Iterables.limit(collection, 10)) {
                    if (first) {
                        first = false;
                    } else {
                        out.print(", ");
                    }
                    printThing(thing, true);
                }
                if (collection.size() > 10) {
                    out.printf(", &hellip;%d more", collection.size() - 10);
                }
                out.print("]");
            }

            @Override
            public void visit(MapModel model) {
                out.print(" {");
                Map<JavaThing, JavaThing> map = model.getMap();
                boolean first = true;
                for (Map.Entry<JavaThing, JavaThing> entry : Iterables.limit(map.entrySet(), 10)) {
                    if (first) {
                        first = false;
                    } else {
                        out.print(", ");
                    }
                    printThing(entry.getKey(), true);
                    out.print(" &rArr; ");
                    printThing(entry.getValue(), true);
                }
                if (map.size() > 10) {
                    out.printf(", &hellip;%d more", map.size() - 10);
                }
                out.print("}");
            }

            @Override
            public void visit(ObjectModel model) {
                out.print(" {");
                Map<String, JavaThing> map = model.getProperties();
                boolean first = true;
                for (Map.Entry<String, JavaThing> entry : map.entrySet()) {
                    if (first) {
                        first = false;
                    } else {
                        out.print(", ");
                    }
                    out.print(entry.getKey());
                    out.print(": ");
                    printThing(entry.getValue(), true);
                }
                out.print("}");
            }
        });
    } else {
        out.print(" (" + size + " bytes)");
    }
}

From source file:org.diqube.execution.steps.ResolveColumnDictIdsStep.java

@Override
public void execute() {
    boolean intermediateRun = !(colBuiltConsumer.getNumberOfTimesWired() == 0 || sourceColumnIsBuilt.get());

    if (colBuiltConsumer.getNumberOfTimesWired() > 0 && colBuiltConsumerIsDone.get()
            && !sourceColumnIsBuilt.get()) {
        logger.debug("Waited for column {} to  be built, but it won't be built. Skipping.", colName);
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();//from   w  ww.ja  v a 2s.c o  m
        return;
    }

    NavigableSet<Long> curAdjustedRowIds;
    synchronized (newestSync) {
        // Fetch rowIds whose values have been adjusted. Note that this is not 100% thread-safe in case intermediateRun ==
        // true. Because in that case we will resolve the corresponding ExecutionEnvironment that should be used later
        // with another sync block - in between a new env might have arrived with new adjustedRowIds - as the set of
        // rowIds being reported only increases though, it is no problem to only execute on a set of adjustedRows on a
        // newer env, as we will resolve those other reported rowIds just one execution later.
        curAdjustedRowIds = newestAdjustedRowIds;
        newestAdjustedRowIds = new TreeSet<>();
    }

    ExecutionEnvironment env;
    if (!intermediateRun)
        env = defaultEnv;
    else {
        synchronized (newestSync) {
            env = newestTemporaryEnv;
            if (env == null || env.getColumnShard(colName) == null) {
                // re-remember those IDs we removed from the set already.
                newestAdjustedRowIds.addAll(curAdjustedRowIds);
                return;
            }
        }
    }

    // fetch row IDs whose columndictid should be resolved.
    NavigableSet<Long> activeRowIds = new TreeSet<>();
    Long rowId;
    while ((rowId = rowIds.poll()) != null)
        activeRowIds.add(rowId);

    if (intermediateRun) {
        // restrict active row IDs to only contain available rows and include & publish notYetProcessedRowIds.
        long maxAvailableRowId = new ColumnVersionBuiltHelper().publishActiveRowIds(env, Arrays.asList(colName),
                activeRowIds, notYetProcessedRowIds);

        if (maxAvailableRowId == -1L) {
            // our column is not built. Should not happen, but just to be sure...
            logger.warn(
                    "ColumnVersionBuiltHelper told us that our column is notr built. This should not happen.");
            return;
        }

        // adjust set of rows that have been adjusted - shrink them to the row IDs that are available. If other rowIds
        // have changed their value this is not interesting to us, because we did notyet resolve their values anyway.
        curAdjustedRowIds = curAdjustedRowIds.headSet(maxAvailableRowId, true);
    } else {
        activeRowIds.addAll(notYetProcessedRowIds);
        notYetProcessedRowIds.clear();
    }

    // be sure to resolve those row IDs fresh that we resolved already but whose value changed.
    activeRowIds.addAll(Sets.intersection(curAdjustedRowIds, processedRowIds));

    if (activeRowIds.size() > 0) {
        logger.trace("Resolving column dict IDs of col {} based on ExecutionEnv {} at row IDs (limit, {}) {}",
                colName, env, activeRowIds.size(), Iterables.limit(activeRowIds, 500));

        if (env.getPureConstantColumnShard(colName) != null) {
            long columnValueId = env.getPureConstantColumnShard(colName).getSingleColumnDictId();

            Map<Long, Long> rowIdToDictIdMap = new HashMap<>();
            for (Long curRowId : activeRowIds)
                rowIdToDictIdMap.put(curRowId, columnValueId);
            logger.trace(
                    "Resolving column dict IDs of col {} done, was easy as it was a constant col, sending out updates",
                    colName);
            forEachOutputConsumerOfType(ColumnDictIdConsumer.class,
                    c -> c.consume(env, colName, rowIdToDictIdMap));
        } else {
            Map<Long, Long> rowIdToColumnValueId = env.getColumnShard(colName)
                    .resolveColumnValueIdsForRows(activeRowIds);

            logger.trace("Resolving column dict IDs of col {} done, sending out updates (limit): {}", colName,
                    Iterables.limit(rowIdToColumnValueId.entrySet(), 100));
            forEachOutputConsumerOfType(ColumnDictIdConsumer.class,
                    c -> c.consume(env, colName, rowIdToColumnValueId));
        }

        processedRowIds.addAll(activeRowIds);
    }

    if (!intermediateRun && rowIdSourceIsEmpty.get() && rowIds.isEmpty() && newestAdjustedRowIds.isEmpty()) {
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();
    }
}

From source file:com.eucalyptus.util.async.Futures.java

/**
 * TODO:GUAVA: remove and use the method available in Guava 10 
 *//*  w  ww .ja  v  a 2  s  .  co  m*/
public static <R> CheckedListenableFuture<List<R>> allAsList(final List<CheckedListenableFuture<R>> futures) {
    final GenericCheckedListenableFuture<List<R>> combined = new GenericCheckedListenableFuture<List<R>>();
    final List<R> resultList = Lists.newArrayListWithCapacity(futures.size());
    Iterables.addAll(resultList, Iterables.limit(Iterables.cycle((R) null), futures.size()));
    final AtomicInteger completionCountdown = new AtomicInteger(futures.size());
    for (int i = 0; i < futures.size(); i++) {
        final int resultIndex = i;
        final CheckedListenableFuture<R> future = futures.get(i);
        future.addListener(new Runnable() {
            @Override
            public void run() {
                try {
                    resultList.set(resultIndex, future.get());
                } catch (final ExecutionException e) {
                    combined.setException(e.getCause());
                } catch (CancellationException e) {
                    combined.cancel(false);
                } catch (InterruptedException e) {
                    // complete so can't happen
                }
                if (completionCountdown.decrementAndGet() == 0) {
                    combined.set(resultList);
                }
            }
        });
    }

    return combined;
}

From source file:eus.ixa.ixa.pipe.nerc.dict.MFSResource.java

/**
 * Get a rank of senses ordered by MFS. 
 * @param lemmaPOSClass the lemma#pos entry
 * @param rankSize the size of the rank//w w w  .j  a  v a  2  s . c om
 * @return the ordered multimap containing the rank
 */
public TreeMultimap<Integer, String> getMFSRanking(String lemmaPOSClass, Integer rankSize) {

    TreeMultimap<Integer, String> mfsResultsMap = getOrderedMap(lemmaPOSClass);
    TreeMultimap<Integer, String> mfsRankMap = TreeMultimap.create(Ordering.natural().reverse(),
            Ordering.natural());
    for (Map.Entry<Integer, String> freqSenseEntry : Iterables.limit(mfsResultsMap.entries(), rankSize)) {
        mfsRankMap.put(freqSenseEntry.getKey(), freqSenseEntry.getValue());
    }
    return mfsRankMap;
}

From source file:com.google.gerrit.server.ReviewersUtil.java

private List<GroupReference> suggestAccountGroups(SuggestReviewers suggestReviewers, ProjectControl ctl) {
    return Lists.newArrayList(Iterables.limit(groupBackend.suggest(suggestReviewers.getQuery(), ctl),
            suggestReviewers.getLimit()));
}

From source file:com.github.benmanes.caffeine.cache.NodeFactoryGenerator.java

private void addNodeSpec(String className, boolean isFinal, Set<Feature> features) {
    TypeName superClass;/*ww  w .  j  a va2 s. c om*/
    Set<Feature> parentFeatures;
    Set<Feature> generateFeatures;
    if (features.size() == 2) {
        parentFeatures = ImmutableSet.of();
        generateFeatures = features;
        superClass = TypeName.OBJECT;
    } else {
        parentFeatures = ImmutableSet.copyOf(Iterables.limit(features, features.size() - 1));
        generateFeatures = ImmutableSet.of(Iterables.getLast(features));
        superClass = ParameterizedTypeName.get(
                ClassName.get(PACKAGE_NAME + ".NodeFactory", encode(Feature.makeClassName(parentFeatures))),
                kTypeVar, vTypeVar);
    }

    NodeContext context = new NodeContext(superClass, className, isFinal, parentFeatures, generateFeatures);
    for (NodeRule rule : rules) {
        rule.accept(context);
    }
    nodeFactory.addType(context.nodeSubtype.build());
    addEnumConstant(className, features);
}

From source file:org.diqube.cluster.ClusterManager.java

@Override
public void localServerStartedServing() {

    if (clusterNodesConfigString == null || "".equals(clusterNodesConfigString)) {
        logger.info("There are no cluster nodes configured, will therefore not connect anywhere.");
        if (clusterManagerListeners != null)
            clusterManagerListeners.forEach(l -> l.clusterInitialized());
        return;/*from  w  ww  .  ja v a 2s  . c  o  m*/
    }
    List<NodeAddress> initialClusterNodes = parseClusterNodes(this.clusterNodesConfigString);
    if (initialClusterNodes == null) {
        logger.warn("There are no cluster nodes configured, will therefore not connect anywhere.");
        if (clusterManagerListeners != null)
            clusterManagerListeners.forEach(l -> l.clusterInitialized());
        return;
    }

    logger.debug("Starting to communicate to cluster using the configured hosts ({})...", initialClusterNodes);

    try {
        // use the first node we can contact to fetch a list of all cluster nodes it knows. That list will later be used
        // to startup the consensus node.
        Set<RNodeAddress> allClusterNodes = new HashSet<>();
        for (NodeAddress nodeAddr : initialClusterNodes) {
            try (Connection<ClusterManagementService.Iface> conn = reserveConnection(nodeAddr)) {
                allClusterNodes.addAll(conn.getService().getAllKnownClusterNodes());
            } catch (ConnectionException | TException | IOException e) {
                logger.warn("Could not contact cluster node at {}.", nodeAddr, e);
            }
        }

        if (allClusterNodes.isEmpty()) {
            logger.warn("There are no cluster nodes alive, will therefore not connect anywhere.");
            if (clusterManagerListeners != null)
                clusterManagerListeners.forEach(l -> l.clusterInitialized());
            return;
        }

        allClusterNodes.forEach(remoteAddr -> consensusClusterNodes.add(new NodeAddress(remoteAddr)));
    } catch (InterruptedException e) {
        logger.error("Interrupted while starting to communicate with cluster", e);
        return;
    }

    logger.info("Gathered {} node addresses of the cluster (limit): {}", consensusClusterNodes.size(),
            Iterables.limit(consensusClusterNodes, 100));

    // enable activity when dead or alive nodes are identified.
    clusterNodeStatusDetailListenerDisabled = false;

    if (clusterManagerListeners != null)
        clusterManagerListeners.forEach(l -> l.clusterInitialized());
}

From source file:org.diqube.execution.steps.GroupFinalAggregationStep.java

private ColumnShard createNewColumn() throws FunctionException {
    SparseColumnShardBuilder<Object> columnBuildManager = columnShardBuilderFactory
            .createSparseColumnShardBuilder(outputColName);

    Map<Long, Object> rowIdToValue = new HashMap<>();
    long maxRowId = -1;
    for (Long rowId : aggregationFunctions.keySet()) {
        rowIdToValue.put(rowId, aggregationFunctions.get(rowId).calculate());
        if (rowId > maxRowId)
            maxRowId = rowId;//from  ww w.j a v a  2 s . c o  m
    }

    logger.trace("Values of new col (limit): {}", Iterables.limit(rowIdToValue.entrySet(), 100));
    columnBuildManager.withNumberOfRows(maxRowId + 1).withValues(rowIdToValue);

    ColumnShard columnShard = columnBuildManager.build();
    return columnShard;
}

From source file:org.apache.brooklyn.demo.CumulusRDFApplication.java

/**
 * Controls the startup locations for the webapp and the cassandra fabric.
 *
 * @see AbstractApplication#start(Collection)
 *///from  w  w w .ja  v a 2s . com
@Override
public void start(Collection<? extends Location> locations) {
    addLocations(locations);

    // The web application only needs to run in one location, use the first
    // TODO use a multi-region web cluster
    Collection<? extends Location> first = MutableList.copyOf(Iterables.limit(locations, 1));

    ServiceStateLogic.setExpectedState(this, Lifecycle.STARTING);
    try {
        Entities.invokeEffector(this, cassandra, Startable.START, MutableMap.of("locations", locations))
                .getUnchecked();
        Entities.invokeEffector(this, webapp, Startable.START, MutableMap.of("locations", first))
                .getUnchecked();
    } catch (Exception e) {
        throw Exceptions.propagate(e);
    } finally {
        ServiceStateLogic.setExpectedState(this, Lifecycle.RUNNING);
    }
    log.info("Started CumulusRDF in " + locations);
}