Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:google.registry.testing.DatastoreHelper.java

private static <R> void persistResources(final Iterable<R> resources, final boolean wantBackup) {
    for (R resource : resources) {
        assertWithMessage("Attempting to persist a Builder is almost certainly an error in test code")
                .that(resource).isNotInstanceOf(Buildable.Builder.class);
    }/*from w  ww  .j a  v a 2 s .c  om*/
    // Persist domains ten at a time, to avoid exceeding the entity group limit.
    for (final List<R> chunk : Iterables.partition(resources, 10)) {
        ofy().transact(new VoidWork() {
            @Override
            public void vrun() {
                for (R resource : chunk) {
                    saveResource(resource, wantBackup);
                }
            }
        });
    }
    // Force the session to be cleared so that when we read it back, we read from the datastore
    // and not from the transaction cache or memcache.
    ofy().clearSessionCache();
    for (R resource : resources) {
        ofy().load().entity(resource).now();
    }
}

From source file:com.eucalyptus.cluster.callback.reporting.CloudWatchHelper.java

public static List<PutMetricDataType> consolidatePutMetricDataList(
        final List<PutMetricDataType> putMetricDataList) {
    final int MAX_PUT_METRIC_DATA_ITEMS = 20;
    final LinkedHashMap<Pair<String, String>, List<MetricDatum>> metricDataMap = new LinkedHashMap<>();
    for (final PutMetricDataType putMetricData : putMetricDataList) {
        final Pair<String, String> userIdAndNamespacePair = Pair.pair(putMetricData.getUserId(),
                putMetricData.getNamespace());
        if (!metricDataMap.containsKey(userIdAndNamespacePair)) {
            metricDataMap.put(userIdAndNamespacePair, new ArrayList<MetricDatum>());
        }/*w  w  w  .jav a  2s . c  om*/
        metricDataMap.get(userIdAndNamespacePair).addAll(putMetricData.getMetricData().getMember());
    }
    final ArrayList<PutMetricDataType> retVal = new ArrayList<>();
    for (final Map.Entry<Pair<String, String>, List<MetricDatum>> metricDataEntry : metricDataMap.entrySet()) {
        for (final List<MetricDatum> datums : Iterables.partition(metricDataEntry.getValue(),
                MAX_PUT_METRIC_DATA_ITEMS)) {
            final MetricData metricData = new MetricData();
            metricData.setMember(Lists.newArrayList(datums));
            final PutMetricDataType putMetricData = new PutMetricDataType();
            putMetricData.setUserId(metricDataEntry.getKey().getLeft());
            putMetricData.markPrivileged();
            putMetricData.setNamespace(metricDataEntry.getKey().getRight());
            putMetricData.setMetricData(metricData);
            retVal.add(putMetricData);
        }
    }
    return retVal;
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.CQLKeyValueService.java

@Override
public void delete(final String tableName, final Multimap<Cell, Long> keys) {
    int cellCount = 0;
    final String deleteQuery = "DELETE FROM " + getFullTableName(tableName) + " WHERE "
            + CassandraConstants.ROW_NAME + " = ? AND " + CassandraConstants.COL_NAME_COL + " = ? AND "
            + CassandraConstants.TS_COL + " = ?";
    final CassandraKeyValueServiceConfig config = configManager.getConfig();
    int fetchBatchCount = config.fetchBatchCount();
    for (final List<Cell> batch : Iterables.partition(keys.keySet(), fetchBatchCount)) {
        cellCount += batch.size();//from w ww .j a v a  2 s.c  om
        PreparedStatement deleteStatement = getPreparedStatement(tableName, deleteQuery,
                longRunningQuerySession).setConsistencyLevel(deleteConsistency);
        List<ResultSetFuture> resultSetFutures = Lists.newArrayList();
        for (Cell key : batch) {
            for (long ts : Ordering.natural().immutableSortedCopy(keys.get(key))) {
                BoundStatement boundStatement = deleteStatement.bind(ByteBuffer.wrap(key.getRowName()),
                        ByteBuffer.wrap(key.getColumnName()), ~ts);
                resultSetFutures.add(longRunningQuerySession.executeAsync(boundStatement));
            }
        }
        for (ResultSetFuture resultSetFuture : resultSetFutures) {
            ResultSet resultSet;
            try {
                resultSet = resultSetFuture.getUninterruptibly();
                resultSet.all();
            } catch (Throwable t) {
                throw Throwables.throwUncheckedException(t);
            }
            CQLKeyValueServices.logTracedQuery(deleteQuery, resultSet, session, cqlStatementCache.NORMAL_QUERY);
        }
    }
    if (cellCount > fetchBatchCount) {
        log.warn("Rebatched in delete a call to " + tableName + " that attempted to delete " + cellCount
                + " cells; this may indicate overly-large batching on a higher level.\n"
                + CassandraKeyValueServices.getFilteredStackTrace("com.palantir"));
    }
}

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java

/**
 * {@inheritDoc}//ww w. j ava 2s .com
 */
@Override
public void move(Connection con, SequencerEngine seq, FxTreeMode mode, long nodeId, long newParentId,
        int newPosition) throws FxApplicationException {

    // Check both nodes (this throws an Exception if they do not exist)
    FxTreeNodeInfo node = getTreeNodeInfo(con, mode, nodeId);
    FxTreeNodeInfoSpreaded destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode, newParentId);
    final FxTreeNodeInfo parent = getTreeNodeInfo(con, mode, newParentId);

    acquireLocksForUpdate(con, mode, Arrays.asList(nodeId, newParentId, node.getParentId()));

    final long currentPos = node.getPosition();

    // Sanity checks for the position
    if (newPosition < 0) {
        newPosition = 0;
    } else if (newPosition > parent.getDirectChildCount()) {
        newPosition = parent.getDirectChildCount() == 0 ? 1 : parent.getDirectChildCount();
    }

    final boolean getsNewParent = node.getParentId() != newParentId;

    // Take ourself into account if the node stays at the same level
    //System.out.println("newPos:"+newPosition);
    if (!getsNewParent) {
        if (node.getPosition() == newPosition) {
            // Nothing to do at all
            return;
        } else if (newPosition < currentPos) {
            //newPosition = newPosition - 1;
        } else {
            newPosition = newPosition + 1;
        }
    }
    if (newPosition < 0)
        newPosition = 0;
    //System.out.println("newPosX:"+newPosition);

    final long oldParent = node.getParentId();

    // Node may not be moved inside itself!
    if (nodeId == newParentId || node.isParentOf(destinationNode)) {
        throw new FxTreeException("ex.tree.move.recursion", nodeId);
    }

    // Make space for the new nodes
    BigInteger spacing = makeSpace(con, seq, mode, newParentId, newPosition, node.getTotalChildCount() + 1);

    // Reload the node to obtain the new boundary and spacing informations
    destinationNode = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, mode, newParentId);
    BigInteger boundaries[] = getBoundaries(con, destinationNode, newPosition);

    // Move the nodes
    int depthDelta = (destinationNode.getDepth() + 1) - node.getDepth();
    reorganizeSpace(con, seq, mode, mode, node.getId(), true, spacing, boundaries[0], null, -1, null, null,
            depthDelta, null, false, false, true);

    Statement stmt = null;
    final String TRUE = StorageManager.getBooleanTrueExpression();
    try {
        // Update the parent of the node
        stmt = con.createStatement();
        stmt.addBatch("UPDATE " + getTable(mode) + " SET PARENT=" + newParentId + " WHERE ID=" + nodeId);
        if (mode != FxTreeMode.Live)
            stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID=" + nodeId);
        stmt.executeBatch();
        stmt.close();

        // Update the childcount of the new and old parent if needed + set dirty flag
        if (getsNewParent) {
            node = getTreeNodeInfo(con, mode, nodeId);
            stmt = con.createStatement();
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT+1 WHERE ID=" + newParentId);
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT-1 WHERE ID=" + oldParent);
            if (mode != FxTreeMode.Live) {
                final List<Long> newChildren = selectAllChildNodeIds(con, mode, node.getLeft(), node.getRight(),
                        false);
                acquireLocksForUpdate(con, mode, Iterables.concat(newChildren, Arrays.asList(nodeId)));

                for (List<Long> part : Iterables.partition(newChildren, SQL_IN_PARTSIZE)) {
                    stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID IN ("
                            + StringUtils.join(part, ',') + ")");
                }

                stmt.addBatch("UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID IN(" + oldParent
                        + "," + newParentId + ")");
            }
            stmt.executeBatch();
            stmt.close();
        }

    } catch (SQLException e) {
        throw new FxTreeException(e, "ex.tree.move.parentUpdate.failed", node.getId(), e.getMessage());
    } finally {
        try {
            if (stmt != null)
                stmt.close();
        } catch (Exception exc) {
            //ignore
        }
    }
}

From source file:com.facebook.presto.hive.HiveMetadata.java

@Override
public void rollbackInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle) {
    HiveInsertTableHandle handle = checkType(insertHandle, HiveInsertTableHandle.class, "invalid insertHandle");

    // if there is a temp directory, we only need to cleanup temp files in this directory
    Optional<Path> writePath = locationService.writePathRoot(handle.getLocationHandle());
    if (writePath.isPresent()) {
        cleanupTempDirectory(writePath.get().toString(), handle.getFilePrefix(), "insert");
        // Note: in this case there is no need to cleanup the target directory as it will only
        // be written to during the commit call and the commit call cleans up after failures.
        return;/*from   www.  ja v  a 2s  .  c om*/
    }

    // Otherwise, insert was directly into the target table and partitions, and all must be checked for temp files
    Optional<Table> table = metastore.getTable(handle.getSchemaName(), handle.getTableName());
    if (!table.isPresent()) {
        log.error(
                "Error rolling back insert into table %s.%s. Table was dropped during insert, and data directory may contain temporary data",
                handle.getSchemaName(), handle.getTableName());
        return;
    }

    Set<String> locationsToClean = new HashSet<>();

    // check the base directory of the table (this is where new partitions are created)
    String tableDirectory = locationService.targetPathRoot(handle.getLocationHandle()).toString();
    locationsToClean.add(tableDirectory);

    // check every existing partition that is outside for the base directory
    if (!table.get().getPartitionKeys().isEmpty()) {
        List<String> partitionNames = metastore.getPartitionNames(handle.getSchemaName(), handle.getTableName())
                .orElse(ImmutableList.of());
        for (List<String> partitionNameBatch : Iterables.partition(partitionNames, 10)) {
            metastore.getPartitionsByNames(handle.getSchemaName(), handle.getTableName(), partitionNameBatch)
                    .orElse(ImmutableMap.of()).values().stream()
                    .map(partition -> partition.getSd().getLocation())
                    .filter(location -> !location.startsWith(tableDirectory)).forEach(locationsToClean::add);
        }
    }

    // delete any file that starts with the unique prefix of this query
    List<String> notDeletedFiles = new ArrayList<>();
    for (String location : locationsToClean) {
        notDeletedFiles.addAll(recursiveDeleteFilesStartingWith(location, handle.getFilePrefix()));
    }
    if (!notDeletedFiles.isEmpty()) {
        log.error("Cannot delete insert data files %s", notDeletedFiles);
    }

    // Note: we can not delete any of these locations since we do not know who created them
}

From source file:ome.services.graphs.GraphTraversal.java

/**
 * Load a specific link property's object relationships into the various cache fields of {@link Planning}.
 * @param linkProperty the link property being processed
 * @param query the HQL to query the property's object relationships
 * @param ids the IDs of the related objects
 * @return which linker objects are related to which linked objects by the given property
 * @throws GraphException if the objects could not be converted to unloaded instances
 *//*w  w w. j a v a2s  .co m*/
private List<Entry<CI, CI>> getLinksToCache(CP linkProperty, String query, Collection<Long> ids)
        throws GraphException {
    final String linkedClassName = getLinkedClass(linkProperty);
    final boolean propertyIsAccessible = model.isPropertyAccessible(linkProperty.className,
            linkProperty.propertyName);
    final SetMultimap<Long, Long> linkerToLinked = HashMultimap.create();
    for (final List<Long> idsBatch : Iterables.partition(ids, BATCH_SIZE)) {
        for (final Object[] result : (List<Object[]>) session.createQuery(query)
                .setParameterList("ids", idsBatch).list()) {
            linkerToLinked.put((Long) result[0], (Long) result[1]);
        }
    }
    final List<Entry<CI, CI>> linkerLinked = new ArrayList<Entry<CI, CI>>();
    final Map<Long, CI> linkersById = findObjectDetails(linkProperty.className, linkerToLinked.keySet());
    final Map<Long, CI> linkedsById = findObjectDetails(linkedClassName,
            new HashSet<Long>(linkerToLinked.values()));
    for (final Entry<Long, Long> linkerIdLinkedId : linkerToLinked.entries()) {
        final CI linker = linkersById.get(linkerIdLinkedId.getKey());
        final CI linked = linkedsById.get(linkerIdLinkedId.getValue());
        if (!planning.detailsNoted.containsKey(linker)) {
            log.warn("failed to query for " + linker);
        } else if (!planning.detailsNoted.containsKey(linked)) {
            log.warn("failed to query for " + linked);
        } else {
            linkerLinked.add(Maps.immutableEntry(linker, linked));
            if (propertyIsAccessible) {
                planning.befores.put(linked, linker);
                planning.afters.put(linker, linked);
            }
            if (log.isDebugEnabled()) {
                log.debug(linkProperty.toCPI(linker.id) + " links to " + linked);
            }
        }
    }
    return linkerLinked;
}

From source file:org.apache.pulsar.client.impl.ConsumerImpl.java

@Override
public void redeliverUnacknowledgedMessages(Set<MessageIdImpl> messageIds) {
    if (conf.getSubscriptionType() != SubscriptionType.Shared) {
        // We cannot redeliver single messages if subscription type is not Shared
        redeliverUnacknowledgedMessages();
        return;/*from   w  w w. j  av  a  2 s  .  co  m*/
    }
    ClientCnx cnx = cnx();
    if (isConnected() && cnx.getRemoteEndpointProtocolVersion() >= ProtocolVersion.v2.getNumber()) {
        int messagesFromQueue = removeExpiredMessagesFromQueue(messageIds);
        Iterable<List<MessageIdImpl>> batches = Iterables.partition(messageIds, MAX_REDELIVER_UNACKNOWLEDGED);
        MessageIdData.Builder builder = MessageIdData.newBuilder();
        batches.forEach(ids -> {
            List<MessageIdData> messageIdDatas = ids.stream().map(messageId -> {
                // attempt to remove message from batchMessageAckTracker
                batchMessageAckTracker.remove(messageId);
                builder.setPartition(messageId.getPartitionIndex());
                builder.setLedgerId(messageId.getLedgerId());
                builder.setEntryId(messageId.getEntryId());
                return builder.build();
            }).collect(Collectors.toList());
            ByteBuf cmd = Commands.newRedeliverUnacknowledgedMessages(consumerId, messageIdDatas);
            cnx.ctx().writeAndFlush(cmd, cnx.ctx().voidPromise());
            messageIdDatas.forEach(MessageIdData::recycle);
        });
        if (messagesFromQueue > 0) {
            increaseAvailablePermits(cnx, messagesFromQueue);
        }
        builder.recycle();
        if (log.isDebugEnabled()) {
            log.debug("[{}] [{}] [{}] Redeliver unacked messages and increase {} permits", subscription, topic,
                    consumerName, messagesFromQueue);
        }
        return;
    }
    if (cnx == null || (getState() == State.Connecting)) {
        log.warn("[{}] Client Connection needs to be establised for redelivery of unacknowledged messages",
                this);
    } else {
        log.warn("[{}] Reconnecting the client to redeliver the messages.", this);
        cnx.ctx().close();
    }
}

From source file:com.google.devtools.build.lib.query2.SkyQueryEnvironment.java

/**
 * Calculates the set of {@link Package} objects, represented as source file targets, that depend
 * on the given list of BUILD files and subincludes (other files are filtered out).
 *//*from www .j a  v  a 2s  .c o  m*/
@ThreadSafe
void getRBuildFiles(Collection<PathFragment> fileIdentifiers, Callback<Target> callback)
        throws QueryException, InterruptedException {
    Collection<SkyKey> files = getSkyKeysForFileFragments(fileIdentifiers);
    Uniquifier<SkyKey> keyUniquifier = new ThreadSafeSkyKeyUniquifier(/*concurrencyLevel=*/ 1);
    Collection<SkyKey> current = keyUniquifier.unique(graph.getSuccessfulValues(files).keySet());
    Set<SkyKey> resultKeys = CompactHashSet.create();
    while (!current.isEmpty()) {
        Collection<Iterable<SkyKey>> reverseDeps = graph.getReverseDeps(current).values();
        current = new HashSet<>();
        for (SkyKey rdep : Iterables.concat(reverseDeps)) {
            if (rdep.functionName().equals(SkyFunctions.PACKAGE)) {
                resultKeys.add(rdep);
                // Every package has a dep on the external package, so we need to include those edges too.
                if (rdep.equals(PackageValue.key(Label.EXTERNAL_PACKAGE_IDENTIFIER))) {
                    if (keyUniquifier.unique(rdep)) {
                        current.add(rdep);
                    }
                }
            } else if (!rdep.functionName().equals(SkyFunctions.PACKAGE_LOOKUP)) {
                // Packages may depend on the existence of subpackages, but these edges aren't relevant to
                // rbuildfiles.
                if (keyUniquifier.unique(rdep)) {
                    current.add(rdep);
                }
            }
        }
        if (resultKeys.size() >= BATCH_CALLBACK_SIZE) {
            for (Iterable<SkyKey> batch : Iterables.partition(resultKeys, BATCH_CALLBACK_SIZE)) {
                callback.process(getBuildFilesForPackageValues(graph.getSuccessfulValues(batch).values()));
            }
            resultKeys.clear();
        }
    }
    callback.process(getBuildFilesForPackageValues(graph.getSuccessfulValues(resultKeys).values()));
}

From source file:org.ofbiz.core.entity.GenericDAO.java

@VisibleForTesting
static EntityCondition transformConditionSplittingInClauseListsToChunksNoLongerThanMaxSize(
        EntityCondition whereEntityCondition, final int maxListSize) {
    return EntityConditionHelper.transformCondition(whereEntityCondition,
            new Function<EntityExpr, EntityCondition>() {
                public EntityCondition apply(final EntityExpr input) {
                    if (input.getOperator().equals(EntityOperator.IN) && input.getRhs() instanceof Collection
                            && ((Collection<?>) input.getRhs()).size() > maxListSize) {
                        //split into list of expressions
                        final ImmutableList<EntityExpr> listOfExpressions = ImmutableList.copyOf(Iterables
                                .transform(Iterables.partition(((Collection<?>) input.getRhs()), maxListSize),
                                        new Function<List<?>, EntityExpr>() {
                                            public EntityExpr apply(@Nullable final List<?> list) {
                                                return new EntityExpr((String) input.getLhs(),
                                                        input.getOperator(), list);
                                            }
                                        }));
                        return new EntityExprList(listOfExpressions, EntityOperator.OR);
                    } else {
                        return input;
                    }/*from   ww w  .ja v a  2  s  . com*/
                }
            });
}

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java

private boolean lockForUpdate(Connection con, String table, Iterable<Long> nodeIds, String field)
        throws FxDbException {
    if (nodeIds == null || !nodeIds.iterator().hasNext()) {
        return tryLock(con, table, null, "id");
    }//from   w w w .j a  v a2  s .c om
    for (List<Long> part : Iterables.partition(nodeIds, 500)) {
        if (!tryLock(con, table, part, "id")) {
            return false; // deadlock detected
        }
    }
    return true; // sucess
}