Example usage for com.google.common.collect Multimap keySet

List of usage examples for com.google.common.collect Multimap keySet

Introduction

In this page you can find the example usage for com.google.common.collect Multimap keySet.

Prototype

Set<K> keySet();

Source Link

Document

Returns a view collection of all distinct keys contained in this multimap.

Usage

From source file:com.zimbra.cs.db.DbBlobConsistency.java

public static int getNumRows(DbConnection conn, Mailbox mbox, String tableName, String idColName,
        Multimap<Integer, Integer> idRevs) throws ServiceException {
    Set<Integer> mail_itemIds = new HashSet<Integer>();
    Multimap<Integer, Integer> rev_itemIds = HashMultimap.create();
    for (Integer itemId : idRevs.keySet()) {
        Collection<Integer> revs = idRevs.get(itemId);
        for (int rev : revs) {
            if (rev == 0) {
                mail_itemIds.add(itemId);
            } else {
                rev_itemIds.put(itemId, rev);
            }//from www  .ja va  2  s  .c o m
        }
    }
    PreparedStatement stmt = null;
    ResultSet rs = null;
    try {
        StringBuffer sql = new StringBuffer();
        boolean revisionTable = tableName.startsWith(DbMailItem.TABLE_REVISION);
        sql.append("SELECT COUNT(*) FROM ").append(DbMailbox.qualifyTableName(mbox, tableName))
                .append(" WHERE ").append(DbMailItem.IN_THIS_MAILBOX_AND);

        if (!revisionTable || mail_itemIds.size() > 0) {
            if (mail_itemIds.size() == 0) {
                sql.append(idColName).append(" in ('')");
            } else {
                sql.append(DbUtil.whereIn(idColName, mail_itemIds.size()));
            }
        }
        if (revisionTable) {
            if (mail_itemIds.size() > 0 && rev_itemIds.size() > 0) {
                sql.append(" OR ");
            }

            if (rev_itemIds.size() > 0) {
                sql.append(DbUtil.whereIn(Db.getInstance().concat(idColName, "'-'", "version"),
                        rev_itemIds.size()));
            }
        }

        stmt = conn.prepareStatement(sql.toString());
        int pos = 1;
        pos = DbMailItem.setMailboxId(stmt, mbox, pos);

        for (int itemId : mail_itemIds) {
            stmt.setInt(pos++, itemId);
        }

        if (revisionTable) {
            for (Integer itemId : rev_itemIds.keySet()) {
                Collection<Integer> revs = rev_itemIds.get(itemId);
                for (int rev : revs) {
                    stmt.setString(pos++, itemId + "-" + rev);
                }
            }
        }
        rs = stmt.executeQuery();
        rs.next();
        return rs.getInt(1);
    } catch (SQLException e) {
        throw ServiceException.FAILURE("getting number of rows for matching id's in " + tableName, e);
    } finally {
        DbPool.closeResults(rs);
        DbPool.quietCloseStatement(stmt);
    }
}

From source file:org.apache.streams.datasift.csdl.DatasiftCsdlUtil.java

public static String csdlMultifieldMatch(Multimap<String, String> require1, Multimap<String, String> require2,
        Multimap<String, String> exclude) throws Exception {

    StringBuilder csdlBuilder = new StringBuilder();

    String require1String;
    String require2String = null;
    String excludeString = null;//w ww.  ja  v  a 2s .  co  m
    List<String> require1clauses = Lists.newArrayList();
    for (String includeField : require1.keySet()) {
        StringBuilder clauseBuilder = new StringBuilder();
        Collection<String> values = require1.get(includeField);
        String match_clause = null;
        if (values.size() > 1)
            match_clause = "contains_any";
        else if (values.size() == 1)
            match_clause = "contains";
        if (match_clause != null) {
            clauseBuilder.append(includeField + " " + match_clause + " \"");
            Joiner.on(",").skipNulls().appendTo(clauseBuilder, values);
            clauseBuilder.append("\"");
            require1clauses.add(clauseBuilder.toString());
        }
    }
    require1String = "(\n" + Joiner.on("\nOR\n").skipNulls().join(require1clauses) + "\n)\n";

    if (require2 != null && require2.keySet().size() > 0) {
        List<String> require2clauses = Lists.newArrayList();
        for (String includeField : require2.keySet()) {
            StringBuilder clauseBuilder = new StringBuilder();
            Collection<String> values = require2.get(includeField);
            String match_clause = null;
            if (values.size() > 1)
                match_clause = "contains_any";
            else if (values.size() == 1)
                match_clause = "contains";
            if (match_clause != null) {
                clauseBuilder.append(includeField + " " + match_clause + " \"");
                Joiner.on(",").skipNulls().appendTo(clauseBuilder, values);
                clauseBuilder.append("\"");
                require2clauses.add(clauseBuilder.toString());
            }
        }
        require2String = "(\n" + Joiner.on("\nOR\n").skipNulls().join(require2clauses) + "\n)\n";
    }

    if (exclude != null && exclude.keySet().size() > 0) {
        List<String> excludeclauses = Lists.newArrayList();
        for (String includeField : exclude.keySet()) {
            StringBuilder clauseBuilder = new StringBuilder();
            Collection<String> values = exclude.get(includeField);
            String match_clause = null;
            if (values.size() > 1)
                match_clause = "contains_any";
            else if (values.size() == 1)
                match_clause = "contains";
            if (match_clause != null) {
                clauseBuilder.append(includeField + " " + match_clause + " \"");
                Joiner.on(",").skipNulls().appendTo(clauseBuilder, values);
                clauseBuilder.append("\"");
                excludeclauses.add(clauseBuilder.toString());
            }
        }
        excludeString = "(\n" + Joiner.on("\nOR\n").skipNulls().join(excludeclauses) + "\n)\n";
    }

    Joiner.on("AND\n").skipNulls().appendTo(csdlBuilder, require1String, require2String);
    csdlBuilder.append("AND NOT\n" + excludeString);

    log.debug(csdlBuilder.toString());

    return csdlBuilder.toString();
}

From source file:com.opengamma.strata.loader.csv.RatesCurvesCsvLoader.java

private static Map<LocalDate, Map<CurveName, Curve>> parseCurves(Predicate<LocalDate> datePredicate,
        CharSource settingsResource, Collection<CharSource> curvesResources) {

    // load curve settings
    Map<CurveName, LoadedCurveSettings> settingsMap = parseCurveSettings(settingsResource);

    // load curves, ensuring curves only be seen once within a date
    Map<LocalDate, Map<CurveName, Curve>> resultMap = new TreeMap<>();
    for (CharSource curvesResource : curvesResources) {
        Multimap<LocalDate, Curve> fileCurvesByDate = parseSingle(datePredicate, curvesResource, settingsMap);
        // Ensure curve names are unique, with a good error message
        for (LocalDate date : fileCurvesByDate.keySet()) {
            Collection<Curve> fileCurves = fileCurvesByDate.get(date);
            Map<CurveName, Curve> resultCurves = resultMap.computeIfAbsent(date, d -> new HashMap<>());
            for (Curve fileCurve : fileCurves) {
                if (resultCurves.put(fileCurve.getName(), fileCurve) != null) {
                    throw new IllegalArgumentException(
                            "Rates curve loader found multiple curves with the same name: "
                                    + fileCurve.getName());
                }//  w  w  w .ja v a 2 s.c  o  m
            }
        }
    }
    return resultMap;
}

From source file:com.zimbra.cs.db.DbBlobConsistency.java

public static void delete(DbConnection conn, Mailbox mbox, Multimap<Integer, Integer> idRevs)
        throws ServiceException {
    Set<Integer> mail_itemIds = new HashSet<Integer>();
    Multimap<Integer, Integer> rev_itemIds = HashMultimap.create();
    for (Integer itemId : idRevs.keySet()) {
        Collection<Integer> revs = idRevs.get(itemId);
        for (int rev : revs) {
            if (rev == 0) {
                mail_itemIds.add(itemId);
            } else {
                rev_itemIds.put(itemId, rev);
            }/*  w  w w. j  a  v a  2  s .c  om*/
        }
    }

    if (mail_itemIds.size() > 0) {
        PreparedStatement miDumpstmt = null;
        try {
            StringBuffer sql = new StringBuffer();
            sql.append("DELETE FROM ").append(DbMailItem.getMailItemTableName(mbox, true)).append(" WHERE ")
                    .append(DbMailItem.IN_THIS_MAILBOX_AND).append(DbUtil.whereIn("id", mail_itemIds.size()));

            miDumpstmt = conn.prepareStatement(sql.toString());
            int pos = 1;
            pos = DbMailItem.setMailboxId(miDumpstmt, mbox, pos);
            for (int itemId : mail_itemIds) {
                miDumpstmt.setInt(pos++, itemId);
            }
            miDumpstmt.execute();
        } catch (SQLException e) {
            throw ServiceException.FAILURE(
                    "deleting " + idRevs.size() + " item(s): " + DbMailItem.getIdListForLogging(idRevs.keys())
                            + " from " + DbMailItem.TABLE_MAIL_ITEM_DUMPSTER + " table",
                    e);
        } finally {
            DbPool.quietCloseStatement(miDumpstmt);
        }
    }

    if (rev_itemIds.size() > 0) {
        PreparedStatement revDumpstmt = null;
        try {
            StringBuffer sql = new StringBuffer();
            sql.append("DELETE FROM ").append(DbMailItem.getRevisionTableName(mbox, true)).append(" WHERE ")
                    .append(DbMailItem.IN_THIS_MAILBOX_AND).append(DbUtil
                            .whereIn(Db.getInstance().concat("item_id", "'-'", "version"), rev_itemIds.size()));

            revDumpstmt = conn.prepareStatement(sql.toString());
            int pos = 1;
            pos = DbMailItem.setMailboxId(revDumpstmt, mbox, pos);
            for (Integer itemId : rev_itemIds.keySet()) {
                Collection<Integer> revs = rev_itemIds.get(itemId);
                for (int rev : revs) {
                    revDumpstmt.setString(pos++, itemId + "-" + rev);
                }
            }
            revDumpstmt.execute();
        } catch (SQLException e) {
            throw ServiceException.FAILURE(
                    "deleting " + idRevs.size() + " item(s): " + DbMailItem.getIdListForLogging(idRevs.keys())
                            + " from " + DbMailItem.TABLE_REVISION_DUMPSTER + " table",
                    e);
        } finally {
            DbPool.quietCloseStatement(revDumpstmt);
        }
    }
}

From source file:com.zimbra.cs.db.DbBlobConsistency.java

public static void export(DbConnection conn, Mailbox mbox, String tableName, String idColName,
        Multimap<Integer, Integer> idRevs, String path) throws ServiceException {
    Set<Integer> mail_itemIds = new HashSet<Integer>();
    Multimap<Integer, Integer> rev_itemIds = HashMultimap.create();
    for (Integer itemId : idRevs.keySet()) {
        Collection<Integer> revs = idRevs.get(itemId);
        for (int rev : revs) {
            if (rev == 0) {
                mail_itemIds.add(itemId);
            } else {
                rev_itemIds.put(itemId, rev);
            }/*from  w w  w. jav a  2 s .  c  o  m*/
        }
    }
    PreparedStatement stmt = null;

    if (!(Db.getInstance() instanceof MySQL)) {
        throw ServiceException.INVALID_REQUEST("export is only supported for MySQL", null);
    }
    ZimbraLog.sqltrace.info("Exporting %d items in table %s to %s.", idRevs.size(), tableName, path);

    try {
        StringBuffer sql = new StringBuffer();
        boolean revisionTable = tableName.startsWith(DbMailItem.TABLE_REVISION);
        sql.append("SELECT * FROM ").append(DbMailbox.qualifyTableName(mbox, tableName)).append(" WHERE ")
                .append(DbMailItem.IN_THIS_MAILBOX_AND);

        if (!revisionTable || mail_itemIds.size() > 0) {
            if (mail_itemIds.size() == 0) {
                sql.append(idColName).append(" in ('')");
            } else {
                sql.append(DbUtil.whereIn(idColName, mail_itemIds.size()));
            }
        }
        if (revisionTable) {
            if (mail_itemIds.size() > 0 && rev_itemIds.size() > 0) {
                sql.append(" OR ");
            }
            if (rev_itemIds.size() > 0) {
                sql.append(DbUtil.whereIn(Db.getInstance().concat(idColName, "'-'", "version"),
                        rev_itemIds.size()));
            }
        }
        sql.append(" INTO OUTFILE ?");
        stmt = conn.prepareStatement(sql.toString());
        int pos = 1;
        pos = DbMailItem.setMailboxId(stmt, mbox, pos);
        for (int itemId : mail_itemIds) {
            stmt.setInt(pos++, itemId);
        }

        if (revisionTable) {
            for (Integer itemId : rev_itemIds.keySet()) {
                Collection<Integer> revs = rev_itemIds.get(itemId);
                for (int rev : revs) {
                    stmt.setString(pos++, itemId + "-" + rev);
                }
            }
        }
        stmt.setString(pos++, path);
        stmt.execute();
    } catch (SQLException e) {
        throw ServiceException.FAILURE("exporting table " + tableName + " to " + path, e);
    } finally {
        DbPool.quietCloseStatement(stmt);
    }
}

From source file:com.ikanow.aleph2.analytics.spark.utils.RddDependencyUtils.java

/** Builds an RDD pipeline
 * @param inputs/*from  www  .ja  v  a 2  s . c  o  m*/
 * @param enrichment_pipeline_config
 * @return a validation, if successful containing (all generated rdds, output rdds only) - normally only the second tuple is needed
 */
public static Validation<String, //(error)
        Tuple2<Map<String, Either<JavaRDD<Tuple2<Long, IBatchRecord>>, JavaRDD<Tuple2<IBatchRecord, Tuple2<Long, IBatchRecord>>>>>, //(list of all RDDs)
                Map<String, JavaRDD<Tuple2<Long, IBatchRecord>>> //(just outputs
>> buildEnrichmentPipeline(final IAnalyticsContext context, final JavaSparkContext jsc,
        final Multimap<String, JavaPairRDD<Object, Tuple2<Long, IBatchRecord>>> inputs,
        final Collection<EnrichmentControlMetadataBean> enrichment_pipeline_config) {
    // Build the pipeline
    final Validation<String, LinkedHashMap<String, Tuple2<Set<String>, List<EnrichmentControlMetadataBean>>>> maybe_enrichment_pipeline = DependencyUtils
            .buildPipelineOfContainers(inputs.keySet(), enrichment_pipeline_config);

    return maybe_enrichment_pipeline.bind(enrichment_pipeline -> {

        // (2 types of RDD - before and after...)
        final HashMap<String, Either<JavaRDD<Tuple2<Long, IBatchRecord>>, JavaRDD<Tuple2<IBatchRecord, Tuple2<Long, IBatchRecord>>>>> mutable_rdds = new HashMap<>();

        // Insert all the inputs:
        inputs.asMap().entrySet().stream().forEach(kv -> mutable_rdds.put(kv.getKey(), Either.left(
                kv.getValue().stream().reduce((acc1, acc2) -> acc1.union(acc2)).get().map(t2 -> t2._2()))));

        // First pass, find all the groupings:
        // (if _any_ immediately downstream element needs grouping then treat as all do and map the extra element away)
        final Map<String, Collection<String>> jobs_that_need_to_group = enrichment_pipeline.values().stream()
                .distinct().<Tuple2<String, Collection<String>>>flatMap(t2 -> {
                    return t2._2().stream().findFirst().map(e -> Optionals.ofNullable(e.grouping_fields()))
                            .<Stream<Tuple2<String, Collection<String>>>>map(
                                    groupings -> t2._1().stream().map(input -> Tuples._2T(input, groupings)))
                            .orElseGet(Stream::empty);
                }).collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));

        // Second pass, do we need $inputs:
        if (enrichment_pipeline.values().stream().distinct()
                .anyMatch(t2 -> t2._1().contains(EnrichmentControlMetadataBean.PREVIOUS_STEP_ALL_INPUTS))) {
            inputs.put(EnrichmentControlMetadataBean.PREVIOUS_STEP_ALL_INPUTS, inputs.values().stream()
                    .reduce((acc1, acc2) -> acc1.union(acc2)).orElse(jsc.emptyRDD().flatMapToPair(__ -> null)));
        }

        // Third/forth pass, create another mutable state that tells us which enrichers are the furthest downstream
        final Set<String> mutable_enricher_set = new HashSet<>(enrichment_pipeline.values().stream().distinct()
                .<EnrichmentControlMetadataBean>flatMap(t2 -> StreamUtils.stream(t2._2().stream().findFirst())) // (discard inputs)
                .map(e -> e.name()).collect(Collectors.toSet()));
        enrichment_pipeline.values().stream().distinct().forEach(t2 -> {
            final EnrichmentControlMetadataBean control = t2._2().stream().findFirst().get();
            mutable_enricher_set.removeAll(control.dependencies());
        });

        // Fifth (!) pass actually does all the work:

        enrichment_pipeline.values().stream().distinct().filter(t2 -> t2._2().stream().findFirst().isPresent()) // (discard inputs)
                .forEach(t2 -> {
                    final EnrichmentControlMetadataBean control = t2._2().stream().findFirst().get();
                    final boolean upstream_is_grouped = !Optionals.ofNullable(control.grouping_fields())
                            .isEmpty();
                    final Collection<String> downstream_grouping = jobs_that_need_to_group.get(control.name());
                    final boolean downstream_is_grouped = null != downstream_grouping;

                    final boolean to_emit = mutable_enricher_set.contains(control.name());

                    // Get all the inputs:
                    // 4 cases depending on whether upstream/downstream are grouped

                    if (upstream_is_grouped) {
                        // (ignore any inputs that haven't been grouped)
                        final JavaRDD<Tuple2<IBatchRecord, Tuple2<Long, IBatchRecord>>> rdd_inputs = t2._1()
                                .stream().map(dep -> mutable_rdds.get(dep))
                                .filter(rdd_choice -> rdd_choice.isRight())
                                .map(rdd_choice -> rdd_choice.right().value())
                                .reduce((acc1, acc2) -> acc1.union(acc2)).orElseGet(() -> jsc.emptyRDD());

                        if (!downstream_is_grouped) {
                            mutable_rdds.put(control.name(),
                                    Either.left(EnrichmentPipelineService.javaGroupOf(rdd_inputs).mapPartitions(
                                            EnrichmentPipelineService.create(context, to_emit, t2._2())
                                                    .javaInMapPartitionsPostGroup())));
                        } else {
                            mutable_rdds.put(control.name(), Either.right(EnrichmentPipelineService
                                    .javaGroupOf(rdd_inputs)
                                    .mapPartitions(EnrichmentPipelineService.create(context, to_emit, t2._2())
                                            .javaInMapPartitionsPrePostGroup(
                                                    new ArrayList<>(downstream_grouping)))));
                        }
                    } else {
                        // (convert any grouped inputs to ungrouped)
                        final JavaRDD<Tuple2<Long, IBatchRecord>> rdd_inputs = t2._1().stream()
                                .map(dep -> mutable_rdds.get(dep))
                                .map(rdd_choice -> rdd_choice.<JavaRDD<Tuple2<Long, IBatchRecord>>>either(
                                        ungrouped -> ungrouped, grouped -> grouped.map(tt2 -> tt2._2())))
                                .reduce((acc1, acc2) -> acc1.union(acc2)).orElseGet(() -> jsc.emptyRDD());

                        if (!downstream_is_grouped) {
                            mutable_rdds.put(control.name(),
                                    Either.left(rdd_inputs.mapPartitions(EnrichmentPipelineService
                                            .create(context, to_emit, t2._2()).javaInMapPartitions())));
                        } else {
                            mutable_rdds.put(control.name(),
                                    Either.right(rdd_inputs.mapPartitions(EnrichmentPipelineService
                                            .create(context, to_emit, t2._2()).javaInMapPartitionsPreGroup(
                                                    new ArrayList<>(downstream_grouping)))));
                        }
                    }
                });

        return Validation.success(Tuples._2T(mutable_rdds,
                mutable_enricher_set.stream().map(e_name -> Tuples._2T(e_name, mutable_rdds.get(e_name)))
                        .filter(name_rdd -> null != name_rdd._2())
                        .<Tuple2<String, JavaRDD<Tuple2<Long, IBatchRecord>>>>map(
                                name__rdd_choice -> Tuples._2T(
                                        name__rdd_choice._1(),
                                        name__rdd_choice
                                                ._2().either(
                                                        ungrouped -> ungrouped,
                                                        grouped -> grouped.map(t2 -> t2._2))))
                        .collect(Collectors
                                .<Tuple2<String, JavaRDD<Tuple2<Long, IBatchRecord>>>, String, JavaRDD<Tuple2<Long, IBatchRecord>>>toMap(
                                        t2 -> t2._1(), t2 -> t2._2()))));
    });
}

From source file:com.bigdata.dastor.dht.BootStrapper.java

static Multimap<InetAddress, Range> getWorkMap(Multimap<Range, InetAddress> rangesWithSourceTarget,
        IFailureDetector failureDetector) {
    /*/*from w  ww .  j a  va2 s  . c o m*/
     * Map whose key is the source node and the value is a map whose key is the
     * target and value is the list of ranges to be sent to it.
    */
    Multimap<InetAddress, Range> sources = ArrayListMultimap.create();

    // TODO look for contiguous ranges and map them to the same source
    for (Range range : rangesWithSourceTarget.keySet()) {
        for (InetAddress source : rangesWithSourceTarget.get(range)) {
            if (failureDetector.isAlive(source)) {
                sources.put(source, range);
                break;
            }
        }
    }
    return sources;
}

From source file:com.android.tools.idea.gradle.project.ProjectDiagnostics.java

public static void findAndReportStructureIssues(@NotNull Project project) {
    Multimap<String, Module> modulesByPath = ArrayListMultimap.create();

    ModuleManager moduleManager = ModuleManager.getInstance(project);
    for (Module module : moduleManager.getModules()) {
        File moduleFilePath = new File(toSystemDependentName(module.getModuleFilePath()));
        File moduleDirPath = moduleFilePath.getParentFile();
        if (moduleDirPath != null) {
            modulesByPath.put(moduleDirPath.getPath(), module);
        }// w w  w . ja v a2s.com
    }

    Set<String> modulePaths = modulesByPath.keySet();
    for (String modulePath : modulePaths) {
        Collection<Module> modules = modulesByPath.get(modulePath);
        int moduleCount = modules.size();
        if (moduleCount > 1) {
            ProjectSyncMessages messages = ProjectSyncMessages.getInstance(project);
            StringBuilder msg = new StringBuilder();
            msg.append("The modules ");

            int i = 0;
            Set<String> moduleNames = Sets.newHashSet();
            for (Module module : modules) {
                if (i++ != 0) {
                    msg.append(", ");
                }
                String name = module.getName();
                moduleNames.add(name);
                msg.append("'").append(name).append("'");
            }
            msg.append(" point to same directory in the file system.");

            String[] lines = { msg.toString(), "Each module has to have a unique path." };
            Message message = new Message(PROJECT_STRUCTURE_ISSUES, Message.Type.ERROR, lines);

            List<DataNode<ModuleData>> modulesToDisplayInDialog = Lists.newArrayList();
            if (ProjectSubset.isSettingEnabled()) {
                ProjectSubset subset = ProjectSubset.getInstance(project);
                Collection<DataNode<ModuleData>> cachedModules = subset.getCachedModuleData();
                if (cachedModules != null) {
                    for (DataNode<ModuleData> moduleNode : cachedModules) {
                        if (moduleNames.contains(moduleNode.getData().getExternalName())) {
                            modulesToDisplayInDialog.add(moduleNode);
                        }
                    }
                }
            }

            if (modulesToDisplayInDialog.isEmpty()) {
                messages.add(message);
            } else {
                messages.add(message, new AddOrRemoveModulesHyperlink());
            }
        }
    }
}

From source file:com.ardor3d.scenegraph.AbstractBufferData.java

private static void handleVBODelete(final Renderer deleter, final Multimap<Object, Integer> idMap) {
    Object currentGLRef = null;/*  w ww.j a  va  2 s .co m*/
    // Grab the current context, if any.
    if (deleter != null && ContextManager.getCurrentContext() != null) {
        currentGLRef = ContextManager.getCurrentContext().getGlContextRep();
    }
    // For each affected context...
    for (final Object glref : idMap.keySet()) {
        // If we have a deleter and the context is current, immediately delete
        if (deleter != null && glref.equals(currentGLRef)) {
            deleter.deleteVBOs(idMap.get(glref));
        }
        // Otherwise, add a delete request to that context's render task queue.
        else {
            GameTaskQueueManager.getManager(ContextManager.getContextForRef(glref))
                    .render(new RendererCallable<Void>() {
                        public Void call() throws Exception {
                            getRenderer().deleteVBOs(idMap.get(glref));
                            return null;
                        }
                    });
        }
    }
}

From source file:com.eucalyptus.cloudwatch.common.internal.domain.metricdata.MetricManager.java

private static void addManyMetrics(Multimap<Class, MetricEntity> metricMap) {
    for (Class c : metricMap.keySet()) {
        for (List<MetricEntity> dataBatchPartial : Iterables.partition(metricMap.get(c),
                METRIC_DATA_NUM_DB_OPERATIONS_PER_TRANSACTION)) {
            try (final TransactionResource db = Entities.transactionFor(c)) {
                int numOperations = 0;
                for (MetricEntity me : dataBatchPartial) {
                    numOperations++;/*from ww w  . j  a v a  2s. c  o m*/
                    if (numOperations % METRIC_DATA_NUM_DB_OPERATIONS_UNTIL_SESSION_FLUSH == 0) {
                        Entities.flushSession(c);
                        Entities.clearSession(c);
                    }
                    Entities.persist(me);
                }
                db.commit();
            }
        }
    }
}