Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:com.facebook.presto.hive.metastore.SemiTransactionalHiveMetastore.java

@GuardedBy("this")
private void rollbackShared() {
    checkHoldsLock();//from w  w w .j a v  a2 s . c o  m

    for (DeclaredIntentionToWrite declaredIntentionToWrite : declaredIntentionsToWrite) {
        switch (declaredIntentionToWrite.getMode()) {
        case STAGE_AND_MOVE_TO_TARGET_DIRECTORY:
        case DIRECT_TO_TARGET_NEW_DIRECTORY: {
            // Note: there is no need to cleanup the target directory as it will only be written
            // to during the commit call and the commit call cleans up after failures.
            Path rootPath = declaredIntentionToWrite.getRootPath();

            // In the case of DIRECT_TO_TARGET_NEW_DIRECTORY, if the directory is not guaranteed to be unique
            // for the query, it is possible that another query or compute engine may see the directory, wrote
            // data to it, and exported it through metastore. Therefore it may be argued that cleanup of staging
            // directories must be carried out conservatively. To be safe, we only delete files that start with
            // the unique prefix for queries in this transaction.

            recursiveDeleteFilesAndLog(declaredIntentionToWrite.getUser(), rootPath,
                    ImmutableList.of(declaredIntentionToWrite.getFilePrefix()), true,
                    format("staging/target_new directory rollback for table %s",
                            declaredIntentionToWrite.getSchemaTableName()));
            break;
        }
        case DIRECT_TO_TARGET_EXISTING_DIRECTORY: {
            Set<Path> pathsToClean = new HashSet<>();

            // Check the base directory of the declared intention
            // * existing partition may also be in this directory
            // * this is where new partitions are created
            Path baseDirectory = declaredIntentionToWrite.getRootPath();
            pathsToClean.add(baseDirectory);

            SchemaTableName schemaTableName = declaredIntentionToWrite.getSchemaTableName();
            Optional<Table> table = delegate.getTable(schemaTableName.getSchemaName(),
                    schemaTableName.getTableName());
            if (table.isPresent()) {
                // check every existing partition that is outside for the base directory
                if (!table.get().getPartitionColumns().isEmpty()) {
                    List<String> partitionNames = delegate
                            .getPartitionNames(schemaTableName.getSchemaName(), schemaTableName.getTableName())
                            .orElse(ImmutableList.of());
                    for (List<String> partitionNameBatch : Iterables.partition(partitionNames, 10)) {
                        Collection<Optional<Partition>> partitions = delegate
                                .getPartitionsByNames(schemaTableName.getSchemaName(),
                                        schemaTableName.getTableName(), partitionNameBatch)
                                .values();
                        partitions.stream().filter(Optional::isPresent).map(Optional::get)
                                .map(partition -> partition.getStorage().getLocation()).map(Path::new)
                                .filter(path -> !isSameOrParent(baseDirectory, path))
                                .forEach(pathsToClean::add);
                    }
                }
            } else {
                logCleanupFailure(
                        "Error rolling back write to table %s.%s. Data directory may contain temporary data. Table was dropped in another transaction.",
                        schemaTableName.getSchemaName(), schemaTableName.getTableName());
            }

            // delete any file that starts with the unique prefix of this query
            for (Path path : pathsToClean) {
                // TODO: It is a known deficiency that some empty directory does not get cleaned up in S3.
                // We can not delete any of the directories here since we do not know who created them.
                recursiveDeleteFilesAndLog(declaredIntentionToWrite.getUser(), path,
                        ImmutableList.of(declaredIntentionToWrite.getFilePrefix()), false,
                        format("target_existing directory rollback for table %s", schemaTableName));
            }

            break;
        }
        default:
            throw new UnsupportedOperationException("Unknown write mode");
        }
    }
}

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorage.java

/**
 * {@inheritDoc}//from  w w w .j a v a 2 s . c o m
 */
@Override
public void removeNode(Connection con, FxTreeMode mode, ContentEngine ce, long nodeId, boolean removeChildren)
        throws FxApplicationException {
    if (mode == FxTreeMode.Live)
        removeChildren = true; //always delete child nodes in live mode
    Statement stmt = null;
    if (nodeId == FxTreeNode.ROOT_NODE)
        throw new FxNoAccessException("ex.tree.delete.root");

    FxTreeNodeInfo nodeInfo = getTreeNodeInfo(con, mode, nodeId);
    ScriptingEngine scripting = EJBLookup.getScriptingEngine();
    final List<Long> scriptBeforeIds = scripting.getByScriptEvent(FxScriptEvent.BeforeTreeNodeRemoved);
    final List<Long> scriptAfterIds = scripting.getByScriptEvent(FxScriptEvent.AfterTreeNodeRemoved);
    //warning: removedNodes will only be available if script mappings for event AfterTreeNodeRemoved exist!
    List<FxTreeNode> removedNodes = scriptAfterIds.size() > 0 ? new ArrayList<FxTreeNode>(100) : null;
    final String TRUE = StorageManager.getBooleanTrueExpression();
    try {
        stmt = con.createStatement();
        if (StorageManager.isDisableIntegrityTransactional()) {
            stmt.execute(StorageManager.getReferentialIntegrityChecksStatement(false));
        }
        List<FxPK> references = new ArrayList<FxPK>(50);
        UserTicket ticket = FxContext.getUserTicket();

        // lock all affected rows
        final List<Long> removeNodeIds = selectAllChildNodeIds(con, mode, nodeInfo.getLeft(),
                nodeInfo.getRight(), true);
        acquireLocksForUpdate(con, mode,
                Iterables.concat(removeNodeIds, Arrays.asList(nodeInfo.getParentId())));
        final Map<FxPK, FxContentSecurityInfo> securityInfos = Maps
                .newHashMapWithExpectedSize(removeNodeIds.size());

        if (removeChildren) {
            //FX-102: edit permission checks on references
            ResultSet rs = stmt.executeQuery("SELECT DISTINCT REF FROM " + getTable(mode) + " WHERE " + " LFT>="
                    + nodeInfo.getLeft() + " AND RGT<=" + nodeInfo.getRight() + " ");
            while (rs != null && rs.next()) {
                try {
                    if (ce != null) {
                        final FxPK pk = new FxPK(rs.getLong(1));
                        final FxContentSecurityInfo info = ce.getContentSecurityInfo(pk);
                        FxPermissionUtils.checkPermission(ticket, ACLPermission.EDIT, info, true);
                        securityInfos.put(pk, info);
                    }
                    references.add(new FxPK(rs.getLong(1)));
                } catch (FxLoadException e) {
                    //ignore, might have been removed meanwhile
                }
            }
            // call BeforeTreeNodeRemoved scripts
            if (scriptBeforeIds.size() > 0 || scriptAfterIds.size() > 0) {
                final FxScriptBinding binding = new FxScriptBinding();
                for (long removedId : removeNodeIds) {
                    final FxTreeNode n = getNode(con, mode, removedId);
                    if (removedNodes != null)
                        removedNodes.add(n);
                    for (long scriptId : scriptBeforeIds) {
                        binding.setVariable("node", n);
                        scripting.runScript(scriptId, binding);
                    }
                }
            }

            for (List<Long> removeIds : Iterables.partition(removeNodeIds, SQL_IN_PARTSIZE)) {
                stmt.addBatch("DELETE FROM " + getTable(mode) + " WHERE id IN ("
                        + StringUtils.join(removeIds, ',') + ")");
            }
        } else {
            //FX-102: edit permission checks on references
            try {
                if (ce != null) {
                    final FxContentSecurityInfo info = ce.getContentSecurityInfo(nodeInfo.getReference());
                    FxPermissionUtils.checkPermission(FxContext.getUserTicket(), ACLPermission.EDIT, info,
                            true);
                    securityInfos.put(nodeInfo.getReference(), info);
                }
                references.add(nodeInfo.getReference());
            } catch (FxLoadException e) {
                //ignore, might have been removed meanwhile
            }
            stmt.addBatch("UPDATE " + getTable(mode) + " SET PARENT=" + nodeInfo.getParentId()
                    + " WHERE PARENT=" + nodeId);
            for (List<Long> part : Iterables.partition(removeNodeIds, SQL_IN_PARTSIZE)) {
                stmt.addBatch("UPDATE " + getTable(mode) + " SET DEPTH=DEPTH-1,DIRTY="
                        + StorageManager.getBooleanExpression(mode != FxTreeMode.Live) + " WHERE id IN ("
                        + StringUtils.join(part, ',') + ") AND DEPTH>0");
            }
            stmt.addBatch("DELETE FROM " + getTable(mode) + " WHERE ID=" + nodeId);
        }

        // Update the childcount of the parents
        if (removeChildren) {
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT-1 WHERE ID="
                    + nodeInfo.getParentId());
        } else {
            stmt.addBatch("UPDATE " + getTable(mode) + " SET CHILDCOUNT=CHILDCOUNT+"
                    + (nodeInfo.getDirectChildCount() - 1) + " WHERE ID=" + nodeInfo.getParentId());
        }

        // Set the dirty flag for the parent if needed
        if (mode != FxTreeMode.Live) {
            stmt.addBatch(
                    "UPDATE " + getTable(mode) + " SET DIRTY=" + TRUE + " WHERE ID=" + nodeInfo.getParentId());
        }

        if (mode == FxTreeMode.Live && exists(con, FxTreeMode.Edit, nodeId)) {
            //check if a node with the same id that has been removed in the live tree exists in the edit tree,
            //the node and all its children will be flagged as dirty in the edit tree
            FxTreeNodeInfo editNode = getTreeNodeInfo(con, FxTreeMode.Edit, nodeId);
            List<Long> editNodeIds = selectAllChildNodeIds(con, FxTreeMode.Edit, editNode.getLeft(),
                    editNode.getRight(), true);

            acquireLocksForUpdate(con, FxTreeMode.Edit, editNodeIds);
            for (List<Long> part : Iterables.partition(editNodeIds, SQL_IN_PARTSIZE)) {
                stmt.addBatch("UPDATE " + getTable(FxTreeMode.Edit) + " SET DIRTY=" + TRUE + " WHERE ID IN ("
                        + StringUtils.join(part, ',') + ")");
            }
        }
        stmt.executeBatch();
        if (ce != null) {
            //if the referenced content is a folder, remove it
            final Set<Long> folderTypeIds = Sets.newHashSet(FxSharedUtils.getSelectableObjectIdList(
                    CacheAdmin.getEnvironment().getType(FxType.FOLDER).getDerivedTypes(true, true)));
            for (FxPK ref : references) {
                FxContentSecurityInfo si = securityInfos.get(ref);
                if (si == null) {
                    si = ce.getContentSecurityInfo(ref);
                }
                if (folderTypeIds.contains(si.getTypeId())) {
                    final int contentCount = ce.getReferencedContentCount(si.getPk());
                    if (contentCount == 0) {
                        ce.remove(ref);
                    }
                }
            }
        }
        afterNodeRemoved(con, nodeInfo, removeChildren);
        if (removedNodes != null) {
            final FxScriptBinding binding = new FxScriptBinding();
            for (long scriptId : scriptAfterIds) {
                for (FxTreeNode n : removedNodes) {
                    binding.setVariable("node", n);
                    scripting.runScript(scriptId, binding);
                }

            }
        }
    } catch (SQLException exc) {
        String next = "";
        if (exc.getNextException() != null)
            next = " next:" + exc.getNextException().getMessage();
        throw new FxRemoveException(LOG, exc, "ex.tree.delete.failed", nodeId, exc.getMessage() + next);
    } finally {
        try {
            if (stmt != null) {
                if (StorageManager.isDisableIntegrityTransactional()) {
                    try {
                        stmt.execute(StorageManager.getReferentialIntegrityChecksStatement(true));
                    } catch (SQLException e) {
                        LOG.error(e);
                    }
                }
                stmt.close();
            }
        } catch (Exception exc) {
            //ignore
        }
    }
}

From source file:org.gvnix.web.datatables.util.QuerydslUtils.java

/**
 * Return IN expression for {@code entityPath.fieldName}.
 * <p/>/*from   ww w. ja v  a2  s  . com*/
 * Expr: <br/>
 * entityPath.fieldName IN ( values ) <br/>
 * <br/>
 * If values.size() > 500 its generates: <br/>
 * Expr: <br/>
 * (entityPath.fieldName IN ( values[0-500] ) OR [entityPath.fieldName IN (
 * values[501-100]... ])) <br/>
 * <br/>
 * 
 * @param entityPath Full path to entity and associations. For example:
 *        {@code Pet} , {@code Pet.owner}
 * @param fieldName Property name in the given entity path. For example:
 *        {@code name} in {@code Pet} entity, {@code firstName} in
 *        {@code Pet.owner} entity.
 * @param values the Set of values to find the given field name, may be null
 * @return BooleanExpression
 */
public static <T, E> BooleanExpression createCollectionExpression(PathBuilder<T> entityPath, String fieldName,
        Collection<E> values) {
    if (StringUtils.isEmpty(fieldName) || values.isEmpty()) {
        return null;
    }

    if (values.size() > 500) {
        BooleanExpression expression = null;
        Iterable<List<E>> collectionParts = Iterables.partition(values, 500);
        for (List<E> part : collectionParts) {
            if (expression == null) {
                expression = doCreateCollectionExpression(entityPath, fieldName, part);
            } else {
                expression = expression.or(doCreateCollectionExpression(entityPath, fieldName, part));
            }
        }
        return expression;
    } else {
        return doCreateCollectionExpression(entityPath, fieldName, values);
    }
}

From source file:ome.services.graphs.GraphTraversal.java

/**
 * Prepare to remove links between the targeted model objects and the remainder of the model object graph.
 * @param isUnlinkIncludeFromExclude if {@link Action#EXCLUDE} objects must be unlinked from {@link Action#INCLUDE} objects
 * and vice versa/*from  w w  w  .  j  av a2 s .c  o  m*/
 * @return the actual unlinker for the targeted model objects, to be used by the caller
 * @throws GraphException if the user does not have permission to unlink the targets
 */
public PlanExecutor unlinkTargets(boolean isUnlinkIncludeFromExclude) throws GraphException {
    if (!progress.contains(Milestone.PLANNED)) {
        throw new IllegalStateException("operation not yet planned");
    }
    /* accumulate plan for unlinking included/deleted from others */
    final SetMultimap<CP, Long> toNullByCP = HashMultimap.create();
    final Map<CP, SetMultimap<Long, Entry<String, Long>>> linkerToIdToLinked = new HashMap<CP, SetMultimap<Long, Entry<String, Long>>>();
    for (final CI object : planning.included) {
        for (final String superclassName : model.getSuperclassesOfReflexive(object.className)) {
            for (final Entry<String, String> forwardLink : model.getLinkedTo(superclassName)) {
                final CP linkProperty = new CP(superclassName, forwardLink.getValue());
                final boolean isCollection = model.getPropertyKind(linkProperty.className,
                        linkProperty.propertyName) == PropertyKind.COLLECTION;
                final CPI linkSource = linkProperty.toCPI(object.id);
                for (final CI linked : planning.forwardLinksCached.get(linkSource)) {
                    final Action linkedAction = getAction(linked);
                    if (linkedAction == Action.DELETE
                            || isUnlinkIncludeFromExclude && linkedAction == Action.EXCLUDE) {
                        /* INCLUDE is linked to EXCLUDE or DELETE, so unlink */
                        if (isCollection) {
                            addRemoval(linkerToIdToLinked, linkProperty.toCPI(object.id), linked);
                        } else {
                            toNullByCP.put(linkProperty, object.id);
                        }
                    }
                }
            }
            if (isUnlinkIncludeFromExclude) {
                for (final Entry<String, String> backwardLink : model.getLinkedBy(superclassName)) {
                    final CP linkProperty = new CP(backwardLink.getKey(), backwardLink.getValue());
                    final boolean isCollection = model.getPropertyKind(linkProperty.className,
                            linkProperty.propertyName) == PropertyKind.COLLECTION;
                    final CPI linkTarget = linkProperty.toCPI(object.id);
                    for (final CI linker : planning.backwardLinksCached.get(linkTarget)) {
                        final Action linkerAction = getAction(linker);
                        if (linkerAction == Action.EXCLUDE) {
                            /* EXCLUDE is linked to INCLUDE, so unlink */
                            if (isCollection) {
                                addRemoval(linkerToIdToLinked, linkProperty.toCPI(linker.id), object);
                            } else {
                                toNullByCP.put(linkProperty, linker.id);
                            }
                        }
                    }
                }
            }
        }
    }
    for (final CI object : planning.deleted) {
        for (final String superclassName : model.getSuperclassesOfReflexive(object.className)) {
            for (final Entry<String, String> backwardLink : model.getLinkedBy(superclassName)) {
                final CP linkProperty = new CP(backwardLink.getKey(), backwardLink.getValue());
                final boolean isCollection = model.getPropertyKind(linkProperty.className,
                        linkProperty.propertyName) == PropertyKind.COLLECTION;
                final CPI linkTarget = linkProperty.toCPI(object.id);
                for (final CI linker : planning.backwardLinksCached.get(linkTarget)) {
                    final Action linkerAction = getAction(linker);
                    if (linkerAction != Action.DELETE) {
                        /* EXCLUDE, INCLUDE or OUTSIDE is linked to DELETE, so unlink */
                        if (isCollection) {
                            addRemoval(linkerToIdToLinked, linkProperty.toCPI(linker.id), object);
                        } else {
                            toNullByCP.put(linkProperty, linker.id);
                        }
                    }
                }
            }
        }
    }
    /* note unlink included/deleted by nulling properties */
    final Map<CP, Collection<Long>> eachToNullByCP = toNullByCP.asMap();
    for (final Entry<CP, Collection<Long>> nullCurr : eachToNullByCP.entrySet()) {
        final CP linker = nullCurr.getKey();
        if (unnullable.get(linker.className).contains(linker.propertyName)
                || model.getPropertyKind(linker.className, linker.propertyName) == PropertyKind.REQUIRED) {
            throw new GraphException("cannot null " + linker);
        }
        final Collection<Long> allIds = nullCurr.getValue();
        assertMayBeUpdated(linker.className, allIds);
    }
    /* note unlink included/deleted by removing from collections */
    for (final Entry<CP, SetMultimap<Long, Entry<String, Long>>> removeCurr : linkerToIdToLinked.entrySet()) {
        final CP linker = removeCurr.getKey();
        final Collection<Long> allIds = removeCurr.getValue().keySet();
        assertMayBeUpdated(linker.className, allIds);
        throw new GraphException("cannot remove elements from collection " + linker);
    }
    return new PlanExecutor() {
        @Override
        public void execute() throws GraphException {
            if (progress.contains(Milestone.UNLINKED)) {
                throw new IllegalStateException("model objects already unlinked");
            }
            /* actually do the noted unlinking */
            for (final Entry<CP, Collection<Long>> nullCurr : eachToNullByCP.entrySet()) {
                final CP linker = nullCurr.getKey();
                final Collection<Long> allIds = nullCurr.getValue();
                for (final List<Long> ids : Iterables.partition(allIds, BATCH_SIZE)) {
                    processor.nullProperties(linker.className, linker.propertyName, ids);
                }
            }
            progress.add(Milestone.UNLINKED);
        }
    };
}

From source file:io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore.java

@GuardedBy("this")
private void rollbackShared() {
    checkHoldsLock();/*ww w  .  j a  v  a 2  s.  c  o  m*/

    for (DeclaredIntentionToWrite declaredIntentionToWrite : declaredIntentionsToWrite) {
        switch (declaredIntentionToWrite.getMode()) {
        case STAGE_AND_MOVE_TO_TARGET_DIRECTORY:
        case DIRECT_TO_TARGET_NEW_DIRECTORY: {
            // For STAGE_AND_MOVE_TO_TARGET_DIRECTORY, there is no need to cleanup the target directory as
            // it will only be written to during the commit call and the commit call cleans up after failures.
            if ((declaredIntentionToWrite.getMode() == DIRECT_TO_TARGET_NEW_DIRECTORY)
                    && skipTargetCleanupOnRollback) {
                break;
            }

            Path rootPath = declaredIntentionToWrite.getRootPath();

            // In the case of DIRECT_TO_TARGET_NEW_DIRECTORY, if the directory is not guaranteed to be unique
            // for the query, it is possible that another query or compute engine may see the directory, wrote
            // data to it, and exported it through metastore. Therefore it may be argued that cleanup of staging
            // directories must be carried out conservatively. To be safe, we only delete files that start with
            // the unique prefix for queries in this transaction.

            recursiveDeleteFilesAndLog(declaredIntentionToWrite.getContext(), rootPath,
                    ImmutableList.of(declaredIntentionToWrite.getFilePrefix()), true,
                    format("staging/target_new directory rollback for table %s",
                            declaredIntentionToWrite.getSchemaTableName()));
            break;
        }
        case DIRECT_TO_TARGET_EXISTING_DIRECTORY: {
            Set<Path> pathsToClean = new HashSet<>();

            // Check the base directory of the declared intention
            // * existing partition may also be in this directory
            // * this is where new partitions are created
            Path baseDirectory = declaredIntentionToWrite.getRootPath();
            pathsToClean.add(baseDirectory);

            SchemaTableName schemaTableName = declaredIntentionToWrite.getSchemaTableName();
            Optional<Table> table = delegate.getTable(schemaTableName.getSchemaName(),
                    schemaTableName.getTableName());
            if (table.isPresent()) {
                // check every existing partition that is outside for the base directory
                if (!table.get().getPartitionColumns().isEmpty()) {
                    List<String> partitionNames = delegate
                            .getPartitionNames(schemaTableName.getSchemaName(), schemaTableName.getTableName())
                            .orElse(ImmutableList.of());
                    for (List<String> partitionNameBatch : Iterables.partition(partitionNames, 10)) {
                        Collection<Optional<Partition>> partitions = delegate
                                .getPartitionsByNames(schemaTableName.getSchemaName(),
                                        schemaTableName.getTableName(), partitionNameBatch)
                                .values();
                        partitions.stream().filter(Optional::isPresent).map(Optional::get)
                                .map(partition -> partition.getStorage().getLocation()).map(Path::new)
                                .filter(path -> !isSameOrParent(baseDirectory, path))
                                .forEach(pathsToClean::add);
                    }
                }
            } else {
                logCleanupFailure(
                        "Error rolling back write to table %s.%s. Data directory may contain temporary data. Table was dropped in another transaction.",
                        schemaTableName.getSchemaName(), schemaTableName.getTableName());
            }

            // delete any file that starts with the unique prefix of this query
            for (Path path : pathsToClean) {
                // TODO: It is a known deficiency that some empty directory does not get cleaned up in S3.
                // We can not delete any of the directories here since we do not know who created them.
                recursiveDeleteFilesAndLog(declaredIntentionToWrite.getContext(), path,
                        ImmutableList.of(declaredIntentionToWrite.getFilePrefix()), false,
                        format("target_existing directory rollback for table %s", schemaTableName));
            }

            break;
        }
        default:
            throw new UnsupportedOperationException("Unknown write mode");
        }
    }
}

From source file:ome.services.graphs.GraphTraversal.java

/**
 * Prepare to process the targeted model objects.
 * @return the actual processor for the targeted model objects, to be used by the caller
 * @throws GraphException if the user does not have permission to process the targets or
 * if a cycle is detected in the model object graph
 *///w  w  w  . ja  v  a  2s .c om
public PlanExecutor processTargets() throws GraphException {
    if (!progress.contains(Milestone.PLANNED)) {
        throw new IllegalStateException("operation not yet planned");
    }
    final List<Entry<Map<String, Collection<Long>>, Map<String, Collection<Long>>>> toJoinAndDelete = new ArrayList<Entry<Map<String, Collection<Long>>, Map<String, Collection<Long>>>>();
    /* process the targets forward across links */
    while (!planning.blockedBy.isEmpty()) {
        /* determine which objects can be processed in this step */
        final Collection<CI> nowUnblocked = new HashSet<CI>();
        final Iterator<Entry<CI, Set<CI>>> blocks = planning.blockedBy.entrySet().iterator();
        while (blocks.hasNext()) {
            final Entry<CI, Set<CI>> block = blocks.next();
            final CI object = block.getKey();
            if (block.getValue().isEmpty()) {
                blocks.remove();
                nowUnblocked.add(object);
            }
        }
        if (nowUnblocked.isEmpty()) {
            throw new GraphException(
                    "cycle detected among " + Joiner.on(", ").join(planning.blockedBy.keySet()));
        }
        for (final Set<CI> blockers : planning.blockedBy.values()) {
            blockers.removeAll(nowUnblocked);
        }
        final SetMultimap<String, Long> toJoin = HashMultimap.create();
        final SetMultimap<String, Long> toDelete = HashMultimap.create();
        for (final CI object : nowUnblocked) {
            if (planning.included.contains(object)) {
                toJoin.put(object.className, object.id);
            } else {
                toDelete.put(object.className, object.id);
            }
        }
        /* note this group's includes and deletes */
        final Map<String, Collection<Long>> eachToJoin = toJoin.asMap();
        for (final Entry<String, Collection<Long>> oneClassToJoin : eachToJoin.entrySet()) {
            final String className = oneClassToJoin.getKey();
            final Collection<Long> allIds = oneClassToJoin.getValue();
            assertMayBeProcessed(className, allIds);
        }
        final Map<String, Collection<Long>> eachToDelete = toDelete.asMap();
        for (final Entry<String, Collection<Long>> oneClassToDelete : eachToDelete.entrySet()) {
            final String className = oneClassToDelete.getKey();
            final Collection<Long> allIds = oneClassToDelete.getValue();
            assertMayBeDeleted(className, allIds);
        }
        toJoinAndDelete.add(Maps.immutableEntry(eachToJoin, eachToDelete));
    }
    return new PlanExecutor() {
        @Override
        public void execute() throws GraphException {
            if (!progress.contains(Milestone.UNLINKED)) {
                throw new IllegalStateException("model objects not yet unlinked");
            }
            if (progress.contains(Milestone.PROCESSED)) {
                throw new IllegalStateException("model objects already processed");
            }
            /* actually do the noted processing */
            for (final Entry<Map<String, Collection<Long>>, Map<String, Collection<Long>>> next : toJoinAndDelete) {
                final Map<String, Collection<Long>> toJoin = next.getKey();
                final Map<String, Collection<Long>> toDelete = next.getValue();
                /* perform this group's deletes */
                if (!toDelete.isEmpty()) {
                    for (final Entry<String, Collection<Long>> oneClassToDelete : toDelete.entrySet()) {
                        final String className = oneClassToDelete.getKey();
                        final Collection<Long> allIds = oneClassToDelete.getValue();
                        final Collection<Collection<Long>> idGroups;
                        if (OriginalFile.class.getName().equals(className)) {
                            idGroups = ModelObjectSequencer.sortOriginalFileIds(session, allIds);
                        } else {
                            idGroups = Collections.singleton(allIds);
                        }
                        for (final Collection<Long> idGroup : idGroups) {
                            for (final List<Long> ids : Iterables.partition(idGroup, BATCH_SIZE)) {
                                processor.deleteInstances(className, ids);
                            }
                        }
                    }
                }
                /* perform this group's includes */
                if (!toJoin.isEmpty()) {
                    for (final Entry<String, Collection<Long>> oneClassToJoin : toJoin.entrySet()) {
                        final String className = oneClassToJoin.getKey();
                        final Collection<Long> allIds = oneClassToJoin.getValue();
                        for (final List<Long> ids : Iterables.partition(allIds, BATCH_SIZE)) {
                            processor.processInstances(className, ids);
                        }
                    }
                }
            }
            progress.add(Milestone.PROCESSED);
        }
    };
}

From source file:com.cloudera.director.aws.ec2.EC2Provider.java

/**
 * Iterates through the instances identified by the specified virtual instance IDs
 * and calls the specified handler on each instance.
 *
 * @param virtualInstanceIds the virtual instance IDs
 * @param instanceHandler    the instance handler
 *///from  w  ww  .  ja  v a 2  s.c  o  m
private void forEachInstance(Collection<String> virtualInstanceIds,
        EC2Provider.InstanceHandler instanceHandler) {
    List<DescribeInstancesResult> results = Lists.newArrayList();
    for (List<String> virtualInstanceIdChunk : Iterables.partition(virtualInstanceIds,
            MAX_TAG_FILTERING_REQUESTS)) {
        DescribeInstancesResult result = client.describeInstances(new DescribeInstancesRequest()
                .withFilters(new Filter().withName("tag:" + ResourceTags.CLOUDERA_DIRECTOR_ID.getTagKey())
                        .withValues(virtualInstanceIdChunk)));
        results.add(result);
    }

    for (DescribeInstancesResult result : results) {
        forEachInstance(result, instanceHandler);
    }
}

From source file:com.eucalyptus.loadbalancing.workflow.LoadBalancingActivitiesImpl.java

@Override
public void checkServoInstances() throws LoadBalancingActivityException {
    final int NUM_ASGS_TO_DESCRIBE = 8;
    // lookup all LoadBalancerAutoScalingGroup records
    List<LoadBalancerAutoScalingGroup> groups = Lists.newArrayList();
    Map<String, LoadBalancerAutoScalingGroup> allGroupMap = new ConcurrentHashMap<>();
    try (final TransactionResource db = Entities.transactionFor(LoadBalancerAutoScalingGroup.class)) {
        groups = Entities.query(LoadBalancerAutoScalingGroup.named(), true);
        for (LoadBalancerAutoScalingGroup g : groups) {
            allGroupMap.put(g.getName(), g);
        }//from   w  w  w. j a  va  2s . co  m
    } catch (Exception ex) {
        throw new LoadBalancingActivityException("Failed to query loadbalancer autoscaing groups", ex);
    }

    final Map<String, LoadBalancerAutoScalingGroup> groupToQuery = allGroupMap;
    // describe as group and find the unknown instance Ids
    List<AutoScalingGroupType> queriedGroups = Lists.newArrayList();

    for (final List<String> partition : Iterables.partition(groupToQuery.keySet(), NUM_ASGS_TO_DESCRIBE)) {
        try {
            DescribeAutoScalingGroupsResponseType response = EucalyptusActivityTasks.getInstance()
                    .describeAutoScalingGroupsWithVerbose(partition);
            DescribeAutoScalingGroupsResult result = response.getDescribeAutoScalingGroupsResult();
            AutoScalingGroupsType asgroups = result.getAutoScalingGroups();
            queriedGroups.addAll(asgroups.getMember());
        } catch (Exception ex) {
            throw new LoadBalancingActivityException("Failed to describe autoscaling groups", ex);
        }
    }

    /// lookup all servoInstances in the DB
    Map<String, LoadBalancerServoInstance> servoMap = new ConcurrentHashMap<>();
    try (final TransactionResource db = Entities.transactionFor(LoadBalancerServoInstance.class)) {
        final List<LoadBalancerServoInstance> result = Entities.query(LoadBalancerServoInstance.named(), true);
        for (LoadBalancerServoInstance inst : result) {
            servoMap.put(inst.getInstanceId(), inst);
        }
    } catch (Exception ex) {
        throw new LoadBalancingActivityException("Failed to lookup existing servo instances in DB", ex);
    }

    /// for all found instances that's not in the servo instance DB
    ///     create servo record
    final List<LoadBalancerServoInstance> newServos = Lists.newArrayList();
    final Map<String, Instance> foundInstances = new ConcurrentHashMap<>();
    for (final AutoScalingGroupType asg : queriedGroups) {
        Instances instances = asg.getInstances();
        if (instances != null && instances.getMember() != null && instances.getMember().size() > 0) {
            for (final Instance instance : instances.getMember()) {
                final String instanceId = instance.getInstanceId();
                foundInstances.put(instanceId, instance);
                if (!servoMap.containsKey(instanceId)) { /// new instance found
                    try {
                        final LoadBalancerAutoScalingGroup group = allGroupMap
                                .get(asg.getAutoScalingGroupName());
                        if (group == null)
                            throw new IllegalArgumentException("The group with name "
                                    + asg.getAutoScalingGroupName() + " not found in the database");
                        final LoadBalancerServoInstance newInstance = newInstance(instance, group);
                        newServos.add(newInstance); /// persist later
                    } catch (final Exception ex) {
                        LOG.error("Failed to construct servo instance entity", ex);
                        continue;
                    }
                }
            }
        }
    }

    // CASE 1: NEW INSTANCES WITHIN THE AS GROUP FOUND
    if (newServos.size() > 0) {
        try (final TransactionResource db = Entities.transactionFor(LoadBalancerServoInstance.class)) {
            for (LoadBalancerServoInstance instance : newServos) {
                Entities.persist(instance);
            }
            db.commit();
        } catch (Exception ex) {
            LOG.error("Failed to persist the servo instance record", ex);
        }
        if (LoadBalancingSystemVpcs.isCloudVpc().isPresent() && LoadBalancingSystemVpcs.isCloudVpc().get()) {
            try {
                newServos.stream().filter(
                        instance -> LoadBalancerServoInstance.STATE.InService.equals(instance.getState()))
                        .forEach(instance -> LoadBalancingSystemVpcs
                                .setupUserVpcInterface(instance.getInstanceId()));
            } catch (final Exception ex) {
                LOG.error("Failed to attach secondary network interface to ELB instances", ex);
            }
            try { // if servo is in VPC, update ip addresses using the secondary interface's address
                newServos.stream().filter(
                        instance -> LoadBalancerServoInstance.STATE.InService.equals(instance.getState()))
                        .forEach(instance -> {
                            updateIpAddressesInVpc(instance.getInstanceId());
                        });
            } catch (final Exception ex) {
                LOG.error("Failed to retrieve IP addresses of secondary network interface");
            }
        }
    }

    List<LoadBalancerServoInstanceCoreView> servoRecords = Lists.newArrayList();
    for (String groupName : groupToQuery.keySet()) {
        final LoadBalancerAutoScalingGroup group = groupToQuery.get(groupName);
        servoRecords.addAll(group.getServos());
    }

    //final List<LoadBalancerServoInstance> registerDnsARec = Lists.newArrayList();
    for (LoadBalancerServoInstanceCoreView instanceView : servoRecords) {
        /// CASE 2: EXISTING SERVO INSTANCES ARE NOT FOUND IN THE ASG QUERY RESPONSE
        if (!foundInstances.containsKey(instanceView.getInstanceId())
                && !instanceView.getState().equals(LoadBalancerServoInstance.STATE.Retired)) {
            LoadBalancerServoInstance instance;
            try {
                instance = LoadBalancerServoInstanceEntityTransform.INSTANCE.apply(instanceView);
            } catch (final Exception ex) {
                LOG.error("Failed to transform servo instance from the view", ex);
                continue;
            }
            try (final TransactionResource db = Entities.transactionFor(LoadBalancerServoInstance.class)) {
                final LoadBalancerServoInstance update = Entities.uniqueResult(instance);
                update.setState(LoadBalancerServoInstance.STATE.Error);
                Entities.persist(update);
                db.commit();
            } catch (Exception ex) {
                LOG.error(String.format("Failed to mark the servo instance's state to ERROR (%s)",
                        instance.getInstanceId()));
            }
        } else if (foundInstances.containsKey(instanceView.getInstanceId())) {
            /// CASE 3: INSTANCE STATE UPDATED
            Instance instanceCurrent = foundInstances.get(instanceView.getInstanceId());
            final String healthState = instanceCurrent.getHealthStatus();
            final String lifecycleState = instanceCurrent.getLifecycleState();
            LoadBalancerServoInstance.STATE curState = instanceView.getState();
            LoadBalancerServoInstance.STATE newState = curState;

            if (healthState != null && !healthState.equals("Healthy")) {
                newState = LoadBalancerServoInstance.STATE.Error;
            } else if (lifecycleState != null) {
                switch (lifecycleState) {
                case "Pending":
                    newState = LoadBalancerServoInstance.STATE.Pending;
                    break;
                case "Quarantined":
                    newState = LoadBalancerServoInstance.STATE.Error;
                    break;
                case "InService":
                    newState = LoadBalancerServoInstance.STATE.InService;
                    break;
                case "Terminating":
                case "Terminated":
                    newState = LoadBalancerServoInstance.STATE.OutOfService;
                    break;
                }
            }

            if (!curState.equals(LoadBalancerServoInstance.STATE.Retired) && !curState.equals(newState)) {
                LoadBalancerServoInstance instance;
                try {
                    instance = LoadBalancerServoInstanceEntityTransform.INSTANCE.apply(instanceView);
                } catch (final Exception ex) {
                    LOG.error("Failed to transform servo instance from the view", ex);
                    continue;
                }
                try (final TransactionResource db = Entities.transactionFor(LoadBalancerServoInstance.class)) {
                    final LoadBalancerServoInstance update = Entities.uniqueResult(instance);
                    update.setState(newState);
                    Entities.persist(update);
                    db.commit();
                } catch (Exception ex) {
                    LOG.error(String.format("Failed to commit the servo instance's state change (%s)",
                            instance.getInstanceId()));
                }
                if (LoadBalancerServoInstance.STATE.InService.equals(newState)) {
                    try {
                        if (LoadBalancingSystemVpcs.isCloudVpc().isPresent()
                                && LoadBalancingSystemVpcs.isCloudVpc().get()) {
                            LoadBalancingSystemVpcs.setupUserVpcInterface(instance.getInstanceId());
                            updateIpAddressesInVpc(instance.getInstanceId());
                        }
                    } catch (final Exception ex) {
                        LOG.error("Failed to attach secondary network interface to ELB instances", ex);
                    }
                }
            }
        }
    }
}

From source file:com.eucalyptus.loadbalancing.workflow.LoadBalancingActivitiesImpl.java

@Override
public void checkBackendInstances() throws LoadBalancingActivityException {
    final int NUM_INSTANCES_TO_DESCRIBE = 8;

    /// determine backend instances to query (an instance can be registered to multiple ELBs)
    final Map<String, List<LoadBalancerBackendInstance>> allInstances = Maps.newHashMap();
    try (final TransactionResource db = Entities.transactionFor(LoadBalancerBackendInstance.class)) {
        final List<LoadBalancerBackendInstance> instances = Entities.query(LoadBalancerBackendInstance.named());
        for (final LoadBalancerBackendInstance instance : instances) {
            if (!allInstances.containsKey(instance.getInstanceId())) {
                allInstances.put(instance.getInstanceId(), Lists.newArrayList());
            }//from w  w w .j  a  v  a 2  s.  c  o  m
            allInstances.get(instance.getInstanceId()).add(instance);
        }
    } catch (final Exception ex) {
        throw new LoadBalancingActivityException("Failed to query backend instances", ex);
    }

    final List<RunningInstancesItemType> queryResult = Lists.newArrayList();
    for (final List<String> partition : Iterables.partition(allInstances.keySet(), NUM_INSTANCES_TO_DESCRIBE)) {
        try {
            queryResult.addAll(
                    EucalyptusActivityTasks.getInstance().describeSystemInstancesWithVerbose(partition));
        } catch (final Exception ex) {
            LOG.warn("Failed to query instances", ex);
            break;
        }
    }

    //EUCA-9919: remove registered instances when terminated
    final Set<String> terminatedInstances = Sets.newHashSet();
    final Map<String, LoadBalancerBackendInstanceStates> stateMap = new HashMap<>();
    final Map<String, RunningInstancesItemType> runningInstances = new HashMap<String, RunningInstancesItemType>();
    for (final RunningInstancesItemType instance : queryResult) {
        final String state = instance.getStateName();
        if ("pending".equals(state))
            stateMap.put(instance.getInstanceId(), LoadBalancerBackendInstanceStates.InitialRegistration);
        else if ("running".equals(state)) {
            runningInstances.put(instance.getInstanceId(), instance);
        } else if ("shutting-down".equals(state))
            stateMap.put(instance.getInstanceId(), LoadBalancerBackendInstanceStates.InstanceInvalidState);
        else if ("terminated".equals(state)) {
            stateMap.put(instance.getInstanceId(), LoadBalancerBackendInstanceStates.InstanceInvalidState);
            terminatedInstances.add(instance.getInstanceId());
        } else if ("stopping".equals(state))
            stateMap.put(instance.getInstanceId(), LoadBalancerBackendInstanceStates.InstanceStopped);
        else if ("stopped".equals(state))
            stateMap.put(instance.getInstanceId(), LoadBalancerBackendInstanceStates.InstanceStopped);
    }

    final Set<LoadBalancerBackendInstance> backendsToDelete = Sets.newHashSet();
    for (final String instanceId : allInstances.keySet()) {
        for (final LoadBalancerBackendInstance be : allInstances.get(instanceId)) {
            if (terminatedInstances.contains(instanceId)) { // case 1: instance terminated
                backendsToDelete.add(be);
                continue;
            }
            if (stateMap.containsKey(instanceId)) { // case 2: instance not in running state
                try (final TransactionResource db = Entities
                        .transactionFor(LoadBalancerBackendInstance.class)) {
                    final LoadBalancerBackendInstanceStates trueState = stateMap.get(be.getInstanceId());
                    final LoadBalancerBackendInstance update = Entities.uniqueResult(be);
                    update.setBackendState(trueState.getState());
                    update.setReasonCode(trueState.getReasonCode());
                    update.setDescription(trueState.getDescription());
                    Entities.persist(update);
                    db.commit();
                } catch (final Exception ex) {
                    ;
                }
            } else if (runningInstances.containsKey(instanceId)) { // case 3: instance running
                // case 3.a: check if instance was re-started (EUCA-11859)
                if (LoadBalancerBackendInstanceStates.InstanceStopped.isInstanceState(be)) {
                    final LoadBalancerBackendInstanceStates registration = LoadBalancerBackendInstanceStates.InitialRegistration;
                    try (final TransactionResource db = Entities
                            .transactionFor(LoadBalancerBackendInstance.class)) {
                        final LoadBalancerBackendInstance update = Entities.uniqueResult(be);
                        update.setBackendState(registration.getState());
                        update.setReasonCode(registration.getReasonCode());
                        update.setDescription(registration.getDescription());
                        Entities.persist(update);
                        db.commit();
                    } catch (final Exception ex) {
                        ;
                    }
                }

                // case 3.b: check instance's IP address change
                String instanceIpAddress = null;
                if (be.getLoadBalancer().getVpcId() == null)
                    instanceIpAddress = runningInstances.get(instanceId).getIpAddress();
                else
                    instanceIpAddress = runningInstances.get(instanceId).getPrivateIpAddress();
                if (instanceIpAddress == null) {
                    LOG.warn(String.format("Failed to determine ELB backend instance's IP address: %s",
                            instanceId));
                } else if (!instanceIpAddress.equals(be.getIpAddress())) {
                    try (final TransactionResource db = Entities
                            .transactionFor(LoadBalancerBackendInstance.class)) {
                        final LoadBalancerBackendInstance update = Entities.uniqueResult(be);
                        update.setIpAddress(instanceIpAddress);
                        update.setPartition(runningInstances.get(instanceId).getPlacement());
                        Entities.persist(update);
                        db.commit();
                    } catch (final Exception ex) {
                        ;
                    }
                }
            }
        }
    }

    for (final LoadBalancerBackendInstance be : backendsToDelete) {
        try (final TransactionResource db = Entities.transactionFor(LoadBalancerBackendInstance.class)) {
            final LoadBalancerBackendInstance entity = Entities.uniqueResult(be);
            Entities.delete(entity);
            LOG.info("Instance " + be.getInstanceId() + " is terminated and removed from ELB");
            db.commit();
        } catch (final Exception ex) {
            ;
        }
    }

    /// mark outdated instances as Error
    final int HealthUpdateTimeoutSec = 3 * MAX_HEALTHCHECK_INTERVAL_SEC; /// 6 minutes
    final Predicate<LoadBalancerBackendInstance> unreachableLoadbalancer = (instance) -> {
        if (LoadBalancerBackendInstanceStates.UnrechableLoadBalancer.isInstanceState(instance))
            return false;
        if (LoadBalancerBackendInstanceStates.InstanceStopped.isInstanceState(instance))
            return false;
        final long currentTime = System.currentTimeMillis();
        Date lastUpdated = instance.instanceStateLastUpdated();
        if (lastUpdated == null)
            lastUpdated = instance.getCreationTimestamp();
        final int diffSec = (int) ((currentTime - lastUpdated.getTime()) / 1000.0);
        return diffSec > HealthUpdateTimeoutSec;
    };

    final Set<LoadBalancerBackendInstance> outdatedInstances = allInstances.values().stream()
            .flatMap(Collection::stream).filter(v -> !backendsToDelete.contains(v)) // DB records deleted already
            .filter(v -> unreachableLoadbalancer.apply(v)).collect(Collectors.toSet());

    if (!outdatedInstances.isEmpty()) {
        final LoadBalancerBackendInstanceStates unreachable = LoadBalancerBackendInstanceStates.UnrechableLoadBalancer;
        try (TransactionResource db = Entities.transactionFor(LoadBalancerBackendInstance.class)) {
            for (final LoadBalancerBackendInstance instance : outdatedInstances) {
                final LoadBalancerBackendInstance update = Entities.uniqueResult(instance);
                update.setState(unreachable.getState());
                update.setReasonCode(unreachable.getReasonCode());
                update.setDescription(unreachable.getDescription());
                Entities.persist(update);
            }
            db.commit();
        } catch (final Exception ex) {
            ;
        }
    }
}