Example usage for com.google.common.collect Iterables limit

List of usage examples for com.google.common.collect Iterables limit

Introduction

In this page you can find the example usage for com.google.common.collect Iterables limit.

Prototype

public static <T> Iterable<T> limit(final Iterable<T> iterable, final int limitSize) 

Source Link

Document

Creates an iterable with the first limitSize elements of the given iterable.

Usage

From source file:cosmos.mapred.MediawikiQueries.java

public void run(int numIterations) throws Exception {
    final Random offsetR = new Random(), cardinalityR = new Random();

    int iters = 0;

    while (iters < numIterations) {
        Store id = Store.create(this.con,
                this.con.securityOperations().getUserAuthorizations(this.con.whoami()),
                IdentitySet.<Index>create());

        int offset = offsetR.nextInt(MAX_OFFSET);
        int numRecords = cardinalityR.nextInt(MAX_SIZE) + 1;

        BatchScanner bs = this.con.createBatchScanner("sortswiki", new Authorizations(), 4);

        bs.setRanges(Collections.singleton(new Range(Integer.toString(offset), Integer.toString(MAX_ROW))));

        Iterable<Entry<Key, Value>> inputIterable = Iterables.limit(bs, numRecords);

        this.sorts.register(id);

        System.out.println(Thread.currentThread().getName() + ": " + id.uuid() + " - Iteration " + iters);
        long recordsReturned = 0l;
        Function<Entry<Key, Value>, MultimapRecord> func = new Function<Entry<Key, Value>, MultimapRecord>() {
            @Override//from w w  w.  j  a  va 2  s .co  m
            public MultimapRecord apply(Entry<Key, Value> input) {
                Page p;
                try {
                    p = Page.parseFrom(input.getValue().get());
                } catch (InvalidProtocolBufferException e) {
                    throw new RuntimeException(e);
                }
                return pagesToQueryResult(p);
            }
        };

        Map<Column, Long> counts = Maps.newHashMap();
        ArrayList<MultimapRecord> tformSource = Lists.newArrayListWithCapacity(20000);

        Stopwatch sw = new Stopwatch();
        Stopwatch tformSw = new Stopwatch();

        for (Entry<Key, Value> input : inputIterable) {
            tformSw.start();

            MultimapRecord r = func.apply(input);
            tformSource.add(r);

            tformSw.stop();

            loadCountsForRecord(counts, r);
            recordsReturned++;
        }

        sw.start();
        this.sorts.addResults(id, tformSource);
        sw.stop();

        long actualNumResults = tformSource.size();

        System.out.println(Thread.currentThread().getName() + ": Took " + tformSw + " transforming and " + sw
                + " to store " + recordsReturned + " records");
        logTiming(actualNumResults, tformSw.elapsed(TimeUnit.MILLISECONDS), "transformInput");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "ingest");

        bs.close();

        Random r = new Random();
        int max = r.nextInt(10) + 1;

        // Run a bunch of queries
        for (int count = 0; count < max; count++) {
            long resultCount;
            String name;
            int i = r.nextInt(9);

            if (0 == i) {
                resultCount = docIdFetch(id, counts, actualNumResults);
                name = "docIdFetch";
            } else if (1 == i) {
                resultCount = columnFetch(id, REVISION_ID, counts, actualNumResults);
                name = "revisionIdFetch";
            } else if (2 == i) {
                resultCount = columnFetch(id, PAGE_ID, counts, actualNumResults);
                name = "pageIdFetch";
            } else if (3 == i) {
                groupBy(id, REVISION_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (4 == i) {
                groupBy(id, PAGE_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (5 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                name = "contributorUsernameFetch";
            } else if (6 == i) {
                groupBy(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorUsername";
            } else if (7 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_ID, counts, actualNumResults);
                name = "contributorIdFetch";
            } else {//if (8 == i) {
                groupBy(id, CONTRIBUTOR_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorID";
            }
        }
        System.out.println(Thread.currentThread().getName() + ": not deleting " + id);
        // Delete the results
        sw = new Stopwatch();

        sw.start();

        this.sorts.delete(id);
        sw.stop();

        System.out.println(Thread.currentThread().getName() + ": Took " + sw.toString() + " to delete results");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "deleteResults");

        iters++;
    }

    this.sorts.close();
}

From source file:com.yahoo.pulsar.broker.service.persistent.PersistentDispatcherMultipleConsumers.java

private void readMoreEntries() {
    if (totalAvailablePermits > 0 && isAtleastOneConsumerAvailable()) {
        int messagesToRead = Math.min(totalAvailablePermits, readBatchSize);

        if (!messagesToReplay.isEmpty()) {
            if (havePendingReplayRead) {
                log.debug("[{}] Skipping replay while awaiting previous read to complete", name);
                return;
            }//from  w w w.  jav  a 2 s.  co m

            Set<PositionImpl> messagesToReplayNow = ImmutableSet
                    .copyOf(Iterables.limit(messagesToReplay, messagesToRead));

            if (log.isDebugEnabled()) {
                log.debug("[{}] Schedule replay of {} messages for {} consumers", name,
                        messagesToReplayNow.size(), consumerList.size());
            }

            havePendingReplayRead = true;
            Set<? extends Position> deletedMessages = cursor.asyncReplayEntries(messagesToReplayNow, this,
                    ReadType.Replay);
            // clear already acked positions from replay bucket
            messagesToReplay.removeAll(deletedMessages);
            // if all the entries are acked-entries and cleared up from messagesToReplay, try to read
            // next entries as readCompletedEntries-callback was never called 
            if ((messagesToReplayNow.size() - deletedMessages.size()) == 0) {
                havePendingReplayRead = false;
                readMoreEntries();
            }
        } else if (!havePendingRead) {
            if (log.isDebugEnabled()) {
                log.debug("[{}] Schedule read of {} messages for {} consumers", name, messagesToRead,
                        consumerList.size());
            }
            havePendingRead = true;
            cursor.asyncReadEntriesOrWait(messagesToRead, this, ReadType.Normal);
        } else {
            log.debug("[{}] Cannot schedule next read until previous one is done", name);
        }
    } else {
        if (log.isDebugEnabled()) {
            log.debug("[{}] Consumer buffer is full, pause reading", name);
        }
    }
}

From source file:brooklyn.entity.rebind.Dumpers.java

private static void deepDumpInternal(Object o, Predicate<Field> fieldPredicate, PrintStream out, int indentSize,
        String prefix, List<Object> visited) throws IllegalArgumentException, IllegalAccessException {
    String indent = com.google.common.base.Strings.repeat(" ", indentSize * 2);
    Class<?> clazz = (o != null) ? o.getClass() : null;

    if (o == null) {
        out.println(indent + prefix + "null");
    } else if (isClassUntraversable(clazz)) {
        out.println(indent + prefix + "(untraversable) type=" + clazz + "; val=" + o.toString());
    } else if (containsSame(visited, o)) {
        out.println(indent + prefix + "duplicate (type=" + clazz + "; val=" + o.toString() + ")");
    } else {/* w  ww .  jav a2 s.c o  m*/
        visited.add(o);
        out.println(indent + prefix + "type=" + clazz + "; val=" + o.toString());
        Map<String, Object> members = findMembers(o, fieldPredicate);
        for (Map.Entry<String, Object> entry : Iterables.limit(members.entrySet(), MAX_MEMBERS)) {
            deepDumpInternal(entry.getValue(), fieldPredicate, out, indentSize + 1, "" + entry.getKey() + ": ",
                    visited);
        }
        if (members.size() > MAX_MEMBERS) {
            out.println(indent + prefix + "TRUNCATED (" + members.size() + " members in total)");
        }
    }
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.directory.IndexRootDirectory.java

/**
 * <ul>//from w ww .j a  v  a 2 s  .c  o m
 *     <li>abc -> abc</li>
 *     <li>xy:abc -> xyabc</li>
 *     <li>/oak:index/abc -> abc</li>
 * </ul>
 *
 * The resulting file name would be truncated to MAX_NAME_LENGTH
 */
static String getIndexFolderBaseName(String indexPath) {
    List<String> elements = Lists.newArrayList(PathUtils.elements(indexPath));
    Collections.reverse(elements);
    List<String> result = Lists.newArrayListWithCapacity(2);

    //Max 3 nodeNames including oak:index which is the immediate parent for any indexPath
    for (String e : Iterables.limit(elements, 3)) {
        if ("oak:index".equals(e)) {
            continue;
        }
        //Strip of any char outside of a-zA-Z0-9-
        result.add(e.replaceAll("\\W", ""));
    }

    Collections.reverse(result);
    String name = Joiner.on('_').join(result);
    if (name.length() > MAX_NAME_LENGTH) {
        name = name.substring(0, MAX_NAME_LENGTH);
    }
    return name;
}

From source file:org.onos.yangtools.yang.data.impl.schema.tree.InMemoryDataTreeModification.java

private OperationWithModification resolveModificationFor(final YangInstanceIdentifier path) {
    upgradeIfPossible();/*from   w  w  w.  java  2s . c o  m*/

    /*
     * Walk the strategy and modification trees in-sync, creating modification nodes as needed.
     *
     * If the user has provided wrong input, we may end up with a bunch of TOUCH nodes present
     * ending with an empty one, as we will throw the exception below. This fact could end up
     * being a problem, as we'd have bunch of phantom operations.
     *
     * That is fine, as we will prune any empty TOUCH nodes in the last phase of the ready
     * process.
     */
    ModificationApplyOperation operation = strategyTree;
    ModifiedNode modification = rootNode;

    int i = 1;
    for (final PathArgument pathArg : path.getPathArguments()) {
        final Optional<ModificationApplyOperation> potential = operation.getChild(pathArg);
        if (!potential.isPresent()) {
            throw new SchemaValidationFailedException(String.format("Child %s is not present in schema tree.",
                    Iterables.toString(Iterables.limit(path.getPathArguments(), i))));
        }
        operation = potential.get();
        ++i;

        modification = modification.modifyChild(pathArg, operation.getChildPolicy());
    }

    return OperationWithModification.from(operation, modification);
}

From source file:com.eucalyptus.reporting.Counter.java

private CountPeriod<C> period(final long time) {
    Optional<CountPeriod<C>> period = Optional.empty();
    while (!period.isPresent()) {
        period = periods.get().stream().filter(p -> p.test(time)).findFirst();
        if (!period.isPresent()) {
            final List<CountPeriod<C>> periodList = periods.get();
            final List<CountPeriod<C>> newPeriodList = Lists.newArrayList();
            newPeriodList.add(newPeriod(time));
            while (newPeriodList.size() < periodCount && !periodList.isEmpty()) {
                if (newPeriodList.get(newPeriodList.size() - 1).key.start != periodList.get(0).key.end) {
                    newPeriodList.add(/* w  ww. j a va2 s .  com*/
                            newPeriod((newPeriodList.get(newPeriodList.size() - 1).key.start - periodLength)));
                } else {
                    break;
                }
            }
            if (newPeriodList.size() < periodCount) {
                Iterables.addAll(newPeriodList,
                        Iterables.limit(periodList, periodCount - newPeriodList.size()));
            }
            periods.compareAndSet(periodList, ImmutableList.copyOf(newPeriodList));
        }
    }
    return period.get();
}

From source file:org.diqube.execution.steps.BuildColumnFromValuesStep.java

@Override
protected void execute() {
    // this is the last run of this execute method if the input source is fully done.
    boolean intermediateRun = !sourceIsDone.get();

    if (intermediateRun && !existsOutputConsumerOfType(ColumnVersionBuiltConsumer.class))
        // if this is NOT the last run (= there are more values to be provided), but there is no-one who'd listen to
        // intermediary updates, do not calculate them.
        return;/*from   w  w w .  ja va  2 s .  c om*/

    if (intermediateRun && !atLeastOneInterestingUpdate.get())
        return;

    Map<Long, Object> values;
    Set<Long> curUpdatedRowIds;
    synchronized (columnSync) {
        atLeastOneInterestingUpdate.set(false);

        if (columnValues == null || columnValues.isEmpty()) {
            if (!intermediateRun) {
                // source is done but we did not receive any data. Do not build column, just report "done".
                forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
                doneProcessing();
                return;
            }
            return;
        }

        values = new HashMap<Long, Object>(columnValues);

        curUpdatedRowIds = updatedRowIds;
        updatedRowIds = new HashSet<>();
    }
    long numberOfRows = values.keySet().stream().max(Long::compare).get() + 1;

    SparseColumnShardBuilder<Object> columnShardBuilder = columnShardBuilderFactory
            .createSparseColumnShardBuilder(colName);

    columnShardBuilder.withValues(values);
    columnShardBuilder.withNumberOfRows(numberOfRows);
    ColumnShard newColumn = columnShardBuilder.build();

    // inform ColumnVersionBuiltConsumers
    if (existsOutputConsumerOfType(ColumnVersionBuiltConsumer.class)) {
        logger.trace("Building new column version for {} after adjusting rows (limt) {}", colName,
                Iterables.limit(curUpdatedRowIds, 500));
        VersionedExecutionEnvironment newEnv = columnVersionManager.createNewVersion(newColumn);
        forEachOutputConsumerOfType(ColumnVersionBuiltConsumer.class,
                c -> c.columnVersionBuilt(newEnv, colName, curUpdatedRowIds));
    }

    // if done, inform other consumers.
    if (!intermediateRun) {
        switch (newColumn.getColumnType()) {
        case STRING:
            defaultEnv.storeTemporaryStringColumnShard((StringColumnShard) newColumn);
            break;
        case LONG:
            defaultEnv.storeTemporaryLongColumnShard((LongColumnShard) newColumn);
            break;
        case DOUBLE:
            defaultEnv.storeTemporaryDoubleColumnShard((DoubleColumnShard) newColumn);
            break;
        }

        logger.trace("Built column {} from values received from a ColumnValueConsumer.", colName);
        forEachOutputConsumerOfType(ColumnBuiltConsumer.class, c -> c.columnBuilt(colName));
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();
    }
}

From source file:org.diqube.execution.steps.FilterRequestedColumnsAndActiveRowIdsStep.java

/**
 * Filters all values of all columns that have one of the specified rowIds and informs {@link ColumnValueConsumer}s
 * about them./*from w w  w.  j a va2s . c o m*/
 */
private void processValues(Map<String, Map<Long, Object>> values, Set<Long> rowIds) {
    for (Entry<String, Map<Long, Object>> valueEntry : values.entrySet()) {
        Set<Long> activeValueRowIds = Sets.intersection(valueEntry.getValue().keySet(), rowIds);
        if (!activeValueRowIds.isEmpty()) {
            Map<Long, Object> newValues = Maps.filterKeys(valueEntry.getValue(),
                    rowId -> activeValueRowIds.contains(rowId));

            logger.trace("Sending out values for {}, rowIds (limit) {}", valueEntry.getKey(),
                    Iterables.limit(activeValueRowIds, 100));

            forEachOutputConsumerOfType(ColumnValueConsumer.class,
                    c -> c.consume(valueEntry.getKey(), newValues));
        }
    }
}

From source file:org.diqube.execution.steps.GroupStep.java

@Override
protected void execute() {
    if (columnBuiltConsumer.getNumberOfTimesWired() > 0 && !allColumnsBuilt.get())
        // we wait until our columns are all built.
        return;/* w  w w .  ja  v a2 s .  co  m*/

    if (headGrouper == null)
        // create groupers. Do this just now, as we know that now really all columns are available!
        headGrouper = createGroupers(colNamesToGroupBy, 0).get();

    List<Long> activeRowIds = new ArrayList<>();
    Long newRowId;
    while ((newRowId = rowIds.poll()) != null)
        activeRowIds.add(newRowId);

    if (activeRowIds.size() > 0) {
        // use headGrouper to group the new RowIDs, collect the new groupings in a new map.
        Map<Long, List<Long>> changesGroups = new HashMap<>();
        headGrouper.groupRowIds(activeRowIds, changesGroups);

        logger.trace("Grouped new rowIds (limit each): {}",
                Maps.transformValues(changesGroups, lst -> Iterables.limit(lst, 50)));

        Set<Long> newGroupIds = Sets.difference(changesGroups.keySet(), groups.keySet());

        if (!newGroupIds.isEmpty()) {
            // If we started new groups, we need to resolve the values of the group-by fields (if they are selected, e.g.).
            // As each groupID is in fact a rowID (of one arbitrary row that is inside the group), we find those new row IDs
            // and send them to RowID consumers.
            Long[] newRowIdsArray = newGroupIds.stream().toArray(l -> new Long[l]);
            logger.trace("New group IDs (limit): {}", Iterables.limit(Arrays.asList(newRowIdsArray), 100));

            forEachOutputConsumerOfType(RowIdConsumer.class, c -> c.consume(newRowIdsArray));
        }

        for (Long groupId : changesGroups.keySet()) {
            if (!groups.containsKey(groupId))
                groups.put(groupId, new ArrayList<>(changesGroups.get(groupId)));
            else
                groups.get(groupId).addAll(changesGroups.get(groupId));
        }

        forEachOutputConsumerOfType(GroupDeltaConsumer.class, c -> c.consumeGroupDeltas(changesGroups));
        forEachOutputConsumerOfType(GroupConsumer.class, c -> c.consumeGroups(groups));
    }
    if (sourceIsEmpty.get() && rowIds.isEmpty()) {
        forEachOutputConsumerOfType(GenericConsumer.class, c -> c.sourceIsDone());
        doneProcessing();
    }
}

From source file:gaffer.store.schema.ViewValidator.java

protected boolean validateGroupBy(final boolean isStoreOrdered, final String group,
        final ViewElementDefinition viewElDef, final SchemaElementDefinition schemaElDef) {
    final LinkedHashSet<String> viewGroupBy = viewElDef.getGroupBy();

    boolean isValid = true;
    if (null != viewGroupBy && !viewGroupBy.isEmpty()) {
        final LinkedHashSet<String> schemaGroupBy = schemaElDef.getGroupBy();
        if (null != schemaGroupBy && schemaGroupBy.containsAll(viewGroupBy)) {
            if (isStoreOrdered) {
                final LinkedHashSet<String> schemaGroupBySubset = Sets
                        .newLinkedHashSet(Iterables.limit(schemaGroupBy, viewGroupBy.size()));
                if (!viewGroupBy.equals(schemaGroupBySubset)) {
                    LOGGER.error("Group by properties for group " + group
                            + " are not in the same order as the group by properties in the schema. View groupBy:"
                            + viewGroupBy + ". Schema groupBy:" + schemaGroupBy);
                    isValid = false;//from www  .j  av a  2  s.co m
                }
            }
        } else {
            LOGGER.error("Group by properties for group " + group
                    + " in the view are not all included in the group by field in the schema. View groupBy:"
                    + viewGroupBy + ". Schema groupBy:" + schemaGroupBy);
            isValid = false;
        }
    }

    return isValid;
}