Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:org.apache.aurora.scheduler.reconciliation.TaskReconciler.java

private void doExplicitReconcile(int batchSize) {
    Iterable<List<IScheduledTask>> activeBatches = Iterables.partition(
            Storage.Util.fetchTasks(storage, Query.unscoped().byStatus(Tasks.SLAVE_ASSIGNED_STATES)),
            batchSize);/*www  .  j av a 2  s.  c om*/

    long delay = 0;
    for (List<IScheduledTask> batch : activeBatches) {
        executor.schedule(
                () -> driver
                        .reconcileTasks(batch.stream().map(TASK_TO_PROTO::apply).collect(Collectors.toList())),
                delay, SECONDS.getTimeUnit());
        delay += settings.explicitBatchDelaySeconds;
    }
    explicitRuns.incrementAndGet();
}

From source file:com.eucalyptus.cloudwatch.workflow.alarms.AlarmStateEvaluationDispatcher.java

private List<List<AlarmEntity>> makeResultsList(List<AlarmEntity> results) {
    Multimap<Class, AlarmEntity> classMultiMap = LinkedListMultimap.create();
    for (AlarmEntity alarmEntity : results) {
        classMultiMap.put(MetricEntityFactory.getClassForEntitiesGet(alarmEntity.getMetricType(),
                MetricManager.hash(alarmEntity.getDimensionMap())), alarmEntity);
    }//w  w  w .j a v a  2  s.  c  om
    List<Iterator<List<AlarmEntity>>> iterators = Lists.newArrayList();
    for (Class clazz : classMultiMap.keySet()) {
        iterators.add(Iterables.partition(classMultiMap.get(clazz), 100).iterator());
    }
    List<List<AlarmEntity>> retVal = Lists.newArrayList();
    boolean atLeastOneMightHaveMore = true;
    while (atLeastOneMightHaveMore) {
        atLeastOneMightHaveMore = false;
        for (Iterator<List<AlarmEntity>> iterator : iterators) {
            if (iterator.hasNext()) {
                atLeastOneMightHaveMore = true;
                retVal.add(iterator.next());
            }
        }
    }
    return retVal;
}

From source file:org.apache.beam.runners.spark.translation.SparkGroupAlsoByWindowFn.java

@Override
public Iterable<WindowedValue<KV<K, Iterable<InputT>>>> call(
        WindowedValue<KV<K, Iterable<WindowedValue<InputT>>>> windowedValue) throws Exception {
    K key = windowedValue.getValue().getKey();
    Iterable<WindowedValue<InputT>> inputs = windowedValue.getValue().getValue();

    //------ based on GroupAlsoByWindowsViaOutputBufferDoFn ------//

    // Used with Batch, we know that all the data is available for this key. We can't use the
    // timer manager from the context because it doesn't exist. So we create one and emulate the
    // watermark, knowing that we have all data and it is in timestamp order.
    InMemoryTimerInternals timerInternals = new InMemoryTimerInternals();
    timerInternals.advanceProcessingTime(Instant.now());
    timerInternals.advanceSynchronizedProcessingTime(Instant.now());
    StateInternals<K> stateInternals = stateInternalsFactory.stateInternalsForKey(key);
    GABWOutputWindowedValue<K, InputT> outputter = new GABWOutputWindowedValue<>();

    ReduceFnRunner<K, InputT, Iterable<InputT>, W> reduceFnRunner = new ReduceFnRunner<>(key, windowingStrategy,
            ExecutableTriggerStateMachine
                    .create(TriggerStateMachines.stateMachineForTrigger(windowingStrategy.getTrigger())),
            stateInternals, timerInternals, outputter, new SideInputReader() {
                @Override// w  w w .  j  a  v a  2 s.  c  o m
                public <T> T get(PCollectionView<T> view, BoundedWindow sideInputWindow) {
                    throw new UnsupportedOperationException("GroupAlsoByWindow must not have side inputs");
                }

                @Override
                public <T> boolean contains(PCollectionView<T> view) {
                    throw new UnsupportedOperationException("GroupAlsoByWindow must not have side inputs");
                }

                @Override
                public boolean isEmpty() {
                    throw new UnsupportedOperationException("GroupAlsoByWindow must not have side inputs");
                }
            }, droppedDueToClosedWindow, reduceFn, runtimeContext.getPipelineOptions());

    Iterable<List<WindowedValue<InputT>>> chunks = Iterables.partition(inputs, 1000);
    for (Iterable<WindowedValue<InputT>> chunk : chunks) {
        // Process the chunk of elements.
        reduceFnRunner.processElements(chunk);

        // Then, since elements are sorted by their timestamp, advance the input watermark
        // to the first element.
        timerInternals.advanceInputWatermark(chunk.iterator().next().getTimestamp());
        // Advance the processing times.
        timerInternals.advanceProcessingTime(Instant.now());
        timerInternals.advanceSynchronizedProcessingTime(Instant.now());

        // Fire all the eligible timers.
        fireEligibleTimers(timerInternals, reduceFnRunner);

        // Leave the output watermark undefined. Since there's no late data in batch mode
        // there's really no need to track it as we do for streaming.
    }

    // Finish any pending windows by advancing the input watermark to infinity.
    timerInternals.advanceInputWatermark(BoundedWindow.TIMESTAMP_MAX_VALUE);

    // Finally, advance the processing time to infinity to fire any timers.
    timerInternals.advanceProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);
    timerInternals.advanceSynchronizedProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);

    fireEligibleTimers(timerInternals, reduceFnRunner);

    reduceFnRunner.persist();

    return outputter.getOutputs();
}

From source file:org.apache.metron.pcap.query.PcapCli.java

public int run(String[] args) {
    if (args.length < 1) {
        printBasicHelp();/* www . j a v  a 2 s.  c o m*/
        return -1;
    }
    String jobType = args[0];
    SequenceFileIterable results = null;
    String[] commandArgs = Arrays.copyOfRange(args, 1, args.length);
    Configuration hadoopConf = new Configuration();
    String[] otherArgs = null;
    try {
        otherArgs = new GenericOptionsParser(hadoopConf, commandArgs).getRemainingArgs();
    } catch (IOException e) {
        LOGGER.error("Failed to configure hadoop with provided options: " + e.getMessage(), e);
        return -1;
    }
    CliConfig commonConfig = null;
    if ("fixed".equals(jobType)) {
        FixedCliParser fixedParser = new FixedCliParser();
        FixedCliConfig config = null;
        try {
            config = fixedParser.parse(otherArgs);
            commonConfig = config;
        } catch (ParseException | java.text.ParseException e) {
            System.err.println(e.getMessage());
            System.err.flush();
            fixedParser.printHelp();
            return -1;
        }
        if (config.showHelp()) {
            fixedParser.printHelp();
            return 0;
        }
        Pair<Long, Long> time = timeAsNanosecondsSinceEpoch(config.getStartTime(), config.getEndTime());
        long startTime = time.getLeft();
        long endTime = time.getRight();

        try {
            results = jobRunner.query(new Path(config.getBasePath()), new Path(config.getBaseOutputPath()),
                    startTime, endTime, config.getNumReducers(), config.getFixedFields(), hadoopConf,
                    FileSystem.get(hadoopConf), new FixedPcapFilter.Configurator());
        } catch (IOException | ClassNotFoundException e) {
            LOGGER.error("Failed to execute fixed filter job: " + e.getMessage(), e);
            return -1;
        } catch (InterruptedException e) {
            LOGGER.error("Failed to execute fixed filter job: " + e.getMessage(), e);
            return -1;
        }
    } else if ("query".equals(jobType)) {
        QueryCliParser queryParser = new QueryCliParser();
        QueryCliConfig config = null;
        try {
            config = queryParser.parse(otherArgs);
            commonConfig = config;
        } catch (ParseException | java.text.ParseException e) {
            System.err.println(e.getMessage());
            queryParser.printHelp();
            return -1;
        }
        if (config.showHelp()) {
            queryParser.printHelp();
            return 0;
        }
        Pair<Long, Long> time = timeAsNanosecondsSinceEpoch(config.getStartTime(), config.getEndTime());
        long startTime = time.getLeft();
        long endTime = time.getRight();

        try {
            results = jobRunner.query(new Path(config.getBasePath()), new Path(config.getBaseOutputPath()),
                    startTime, endTime, config.getNumReducers(), config.getQuery(), hadoopConf,
                    FileSystem.get(hadoopConf), new QueryPcapFilter.Configurator());
        } catch (IOException | ClassNotFoundException e) {
            LOGGER.error("Failed to execute query filter job: " + e.getMessage(), e);
            return -1;
        } catch (InterruptedException e) {
            LOGGER.error("Failed to execute query filter job: " + e.getMessage(), e);
            return -1;
        }
    } else {
        printBasicHelp();
        return -1;
    }
    try {
        Iterable<List<byte[]>> partitions = Iterables.partition(results, commonConfig.getNumRecordsPerFile());
        if (partitions.iterator().hasNext()) {
            for (List<byte[]> data : partitions) {
                String timestamp = clock.currentTimeFormatted("yyyyMMddHHmmssSSSZ");
                String outFileName = String.format("pcap-data-%s.pcap", timestamp);
                if (data.size() > 0) {
                    resultsWriter.write(data, outFileName);
                }
            }
        } else {
            System.out.println("No results returned.");
        }
    } catch (IOException e) {
        LOGGER.error("Unable to write file", e);
        return -1;
    } finally {
        try {
            results.cleanup();
        } catch (IOException e) {
            LOGGER.warn("Unable to cleanup files in HDFS", e);
        }
    }
    return 0;
}

From source file:com.eucalyptus.cluster.callback.reporting.DescribeSensorsListener.java

@Override
public void fireEvent(final Hertz event) {
    final long defaultPollIntervalSeconds = TimeUnit.MINUTES.toSeconds(DEFAULT_POLL_INTERVAL_MINS);
    if (!Bootstrap.isOperational() || !BootstrapArgs.isCloudController()
            || !event.isAsserted(defaultPollIntervalSeconds)) {
        return;//from   w  w  w  .ja v  a  2  s.com
    } else {
        if (DEFAULT_POLL_INTERVAL_MINS >= 1) {
            COLLECTION_INTERVAL_TIME_MS = ((int) TimeUnit.MINUTES.toMillis(DEFAULT_POLL_INTERVAL_MINS) / 2);
        } else {
            COLLECTION_INTERVAL_TIME_MS = 0;
        }

        if (COLLECTION_INTERVAL_TIME_MS == 0 || HISTORY_SIZE > 15 || HISTORY_SIZE < 1) {
            LOG.debug("The instance usage report is disabled");
        } else if (COLLECTION_INTERVAL_TIME_MS <= MAX_WRITE_INTERVAL_MS) {

            try {
                if (event.isAsserted(defaultPollIntervalSeconds)) {
                    if (Bootstrap.isFinished() && Hosts.isCoordinator()) {
                        CloudWatchHelper.DefaultInstanceInfoProvider.refresh();
                        for (final ServiceConfiguration ccConfig : Topology
                                .enabledServices(ClusterController.class)) {
                            final String ccHost = ccConfig.getHostName();
                            if (busyHosts.replace(ccHost, false, true)
                                    || busyHosts.putIfAbsent(ccHost, true) == null) {
                                Threads.lookup(Eucalyptus.class, DescribeSensorsListener.class)
                                        .submit(new Callable<Object>() {

                                            @Override
                                            public Object call() throws Exception {
                                                final ExecutorService executorService = Threads
                                                        .lookup(Eucalyptus.class, DescribeSensorsListener.class,
                                                                "response-processing")
                                                        .limitTo(4);
                                                final long startTime = System.currentTimeMillis();
                                                try {
                                                    final List<String> allInstanceIds = VmInstances
                                                            .listWithProjection(
                                                                    VmInstances.instanceIdProjection(),
                                                                    VmInstance.criterion(VmState.RUNNING),
                                                                    VmInstance.zoneCriterion(
                                                                            ccConfig.getPartition()),
                                                                    VmInstance.nonNullNodeCriterion());
                                                    final Iterable<List<String>> processInts = Iterables
                                                            .partition(allInstanceIds, SENSOR_QUERY_BATCH_SIZE);
                                                    for (final List<String> instIds : processInts) {
                                                        final ArrayList<String> instanceIds = Lists
                                                                .newArrayList(instIds);
                                                        /**
                                                         * Here this is hijacking the sensor callback in order to control the thread of execution used when firing
                                                         */
                                                        final DescribeSensorCallback msgCallback = new DescribeSensorCallback(
                                                                HISTORY_SIZE, COLLECTION_INTERVAL_TIME_MS,
                                                                instanceIds) {
                                                            @Override
                                                            public void fireException(Throwable e) {
                                                            }

                                                            @Override
                                                            public void fire(DescribeSensorsResponseType msg) {
                                                            }
                                                        };
                                                        /**
                                                         * Here we actually get the future reference to the result and on a response processing thread, invoke .fire().
                                                         */
                                                        final DescribeSensorsResponseType response = AsyncRequests
                                                                .newRequest(msgCallback).dispatch(ccConfig)
                                                                .get();
                                                        executorService.submit(new Runnable() {
                                                            @Override
                                                            public void run() {
                                                                try {
                                                                    new DescribeSensorCallback(HISTORY_SIZE,
                                                                            COLLECTION_INTERVAL_TIME_MS,
                                                                            instanceIds).fire(response);
                                                                } catch (Exception e) {
                                                                    Exceptions.maybeInterrupted(e);
                                                                }
                                                            }
                                                        });
                                                    }
                                                } finally {
                                                    /**
                                                     * Only and finally set the busy bit back to false.
                                                     */
                                                    busyHosts.put(ccHost, false);
                                                    LOG.debug("Sensor polling for " + ccHost + " took "
                                                            + (System.currentTimeMillis() - startTime) + "ms");
                                                }
                                                return null;
                                            }
                                        });
                            } else {
                                LOG.warn("Skipping sensors polling for " + ccHost
                                        + ", previous poll not complete.");
                            }
                        }
                    }

                }
            } catch (Exception ex) {
                LOG.error("Unable to listen for describe sensors events", ex);
            }

        } else {
            LOG.error("DEFAULT_POLL_INTERVAL_MINS : " + DEFAULT_POLL_INTERVAL_MINS
                    + " must be less than 1440 minutes");
        }
    }
}

From source file:ome.services.query.HierarchyNavigator.java

/**
 * Batch bulk database queries to prime the cache for {@link #doLookup(String, String, Long)}.
 * It is not necessary to call this method, but it is advised if many lookups are anticipated.
 * @param toType the type of the objects to which the query objects may be related, not <code>null</code>
 * @param fromType the query object's type, not <code>null</code>
 * @param fromIds the query objects' database IDs, none <code>null</code>
 *//*from   w w  w  .j a  v  a2 s  .  c o m*/
protected void prepareLookups(String toType, String fromType, Collection<Long> fromIds) {
    /* note which query object IDs have not already had results cached */
    final Set<Long> fromIdsToQuery = new HashSet<Long>(fromIds);
    for (final long fromId : fromIds) {
        if (cache.getFromCache(fromType, fromId, toType) != null) {
            fromIdsToQuery.remove(fromId);
        }
    }
    if (fromIdsToQuery.isEmpty()) {
        /* ... all of them are already cached */
        return;
    }
    /* collate the results from multiple batches */
    final SetMultimap<Long, Long> fromIdsToIds = HashMultimap.create();
    for (final List<Long> fromIdsToQueryBatch : Iterables.partition(fromIdsToQuery, 256)) {
        for (final Object[] queryResult : doQuery(toType, fromType, fromIdsToQueryBatch)) {
            fromIdsToIds.put((Long) queryResult[0], (Long) queryResult[1]);
        }
    }
    /* cache the results by query object */
    for (final Entry<Long, Collection<Long>> fromIdToIds : fromIdsToIds.asMap().entrySet()) {
        cache.putIntoCache(fromType, fromIdToIds.getKey(), toType, ImmutableSet.copyOf(fromIdToIds.getValue()));
    }
    /* note empty results so that the database is not again queried */
    for (final Long fromId : Sets.difference(fromIdsToQuery, fromIdsToIds.keySet())) {
        cache.putIntoCache(fromType, fromId, toType, ImmutableSet.<Long>of());
    }
}

From source file:com.google.api.ads.adwords.awreporting.server.appengine.processors.ReportProcessorAppEngine.java

/**
 * Creates the main tasks that will create the sub tasks to
 * download each report, split in 500 reports per main task.
 * //from  w  ww .  ja va2 s. c om
 * @param builder the session builder.
 * @param reportType the report type.
 * @param dateRangeType the date range type.
 * @param dateStart the start date.
 * @param dateEnd the ending date.
 * @param acountIdList the account IDs.
 * @param properties the properties resource.
 */
private <R extends Report> void downloadAndProcess(String mccAccountId, ReportDefinitionReportType reportType,
        ReportDefinitionDateRangeType dateRangeType, String dateStart, String dateEnd, Set<Long> acountIdList,
        Properties properties) {

    // Download Reports to local files and Generate Report objects
    LOGGER.info("\n\n ** Generating Taks for : " + reportType.name() + " **");

    // No multiple reports of the same type
    ReportDefinition reportDefinition = getReportDefinition(reportType, dateRangeType, dateStart, dateEnd,
            reportType.value(), properties);

    @SuppressWarnings("unchecked")
    Class<R> reportBeanClass = (Class<R>) this.csvReportEntitiesMapping.getReportBeanClass(reportType);

    MccTaskCounter.increasePendingProcessTasks(Long.valueOf(mccAccountId), acountIdList.size());

    // Create a task for each 500 accounts that will create sub-tasks
    for (List<Long> partition : Iterables.partition(acountIdList, 500)) {

        // We will make the queues wait 10 seconds to make sure all Creation tasks get queued.
        // Partition needs to be serializable
        LOGGER.info(reportType.name() + " " + partition.size());
        QueueFactory.getDefaultQueue()
                .add(TaskOptions.Builder.withPayload(new ReportTaskCreator<R>(mccAccountId,
                        Lists.newArrayList(partition), reportDefinition, dateStart, dateEnd, reportRowsSetSize,
                        reportType, dateRangeType, reportBeanClass)).countdownMillis(10 * 1000l));
    }
}

From source file:io.scigraph.owlapi.OwlPostprocessor.java

public void processCategories(Map<String, String> categories) throws InterruptedException, ExecutionException {
    logger.info("Processing categories");
    final ExecutorService pool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
    List<Future<Map<String, Set<Long>>>> contentsFutures = new ArrayList<>();

    Transaction tx = graphDb.beginTx();/*from ww w .  j ava  2s  .c o m*/
    for (Entry<String, String> category : categories.entrySet()) {
        ReadableIndex<Node> nodeIndex = graphDb.index().getNodeAutoIndexer().getAutoIndex();
        Node root = nodeIndex.get(CommonProperties.IRI, category.getKey()).getSingle();
        final Future<Map<String, Set<Long>>> contentFuture = pool
                .submit(new CategoryProcessor(graphDb, root, category.getValue()));
        contentsFutures.add(contentFuture);
    }
    Map<String, Set<Long>> toTag = new HashMap<String, Set<Long>>();
    for (Future<Map<String, Set<Long>>> contentFuture : contentsFutures) {
        final Map<String, Set<Long>> resolved = contentFuture.get();

        Iterator<String> iter = resolved.keySet().iterator();
        if (iter.hasNext()) { // is empty if the provided IRI does not exist
            String key = resolved.keySet().iterator().next();
            if (toTag.containsKey(key)) { // in case of many IRIs map to the same category
                Set<Long> acc = toTag.get(key);
                acc.addAll(resolved.get(key));
                toTag.put(key, acc);
            } else {
                toTag.putAll(resolved);
            }
        }
    }

    pool.shutdown();
    pool.awaitTermination(10, TimeUnit.DAYS);
    tx.success();
    tx.close();

    tx = graphDb.beginTx();
    for (Entry<String, Set<Long>> t : toTag.entrySet()) {
        String category = t.getKey();
        logger.info("Tagging " + t.getValue().size() + " for " + category);
        final ExecutorService taggingPool = Executors
                .newFixedThreadPool(Runtime.getRuntime().availableProcessors());
        List<Future<Boolean>> taggedFutures = new ArrayList<>();

        for (List<Long> ids : Iterables.partition(t.getValue(), 1000)) {
            final Future<Boolean> contentFuture = taggingPool
                    .submit(new CategoryLabeler(graphDb, ids, category));
            taggedFutures.add(contentFuture);
        }

        for (Future<Boolean> taggedFuture : taggedFutures) {
            taggedFuture.get();
        }
        taggingPool.shutdown();
        taggingPool.awaitTermination(10, TimeUnit.DAYS);
    }
    tx.success();
    tx.close();
}

From source file:org.sonar.core.purge.PurgeCommands.java

private void purgeSnapshots(final List<Long> snapshotIds) {
    // note that events are not deleted
    for (Long snapshotId : snapshotIds) {
        purgeMapper.deleteSnapshotDependencies(snapshotId);
    }/*  www  .ja  va 2 s.c  o  m*/
    session.commit();

    for (Long snapshotId : snapshotIds) {
        purgeMapper.deleteSnapshotDuplications(snapshotId);
    }
    session.commit();

    for (Long snapshotId : snapshotIds) {
        purgeMapper.deleteSnapshotSource(snapshotId);
    }
    session.commit();

    for (Long snapshotId : snapshotIds) {
        purgeMapper.deleteSnapshotViolations(snapshotId);
    }
    session.commit();

    for (Long snapshotId : snapshotIds) {
        purgeMapper.deleteSnapshotGraphs(snapshotId);
    }
    session.commit();

    List<Long> metricIdsWithoutHistoricalData = purgeMapper.selectMetricIdsWithoutHistoricalData();
    if (!metricIdsWithoutHistoricalData.isEmpty()) {
        for (Long snapshotId : snapshotIds) {
            purgeMapper.deleteSnapshotWastedMeasures(snapshotId, metricIdsWithoutHistoricalData);
        }
        session.commit();
    }

    List<Long> characteristicIds = purgeMapper.selectCharacteristicIdsToPurge();
    if (!characteristicIds.isEmpty()) {
        for (Long snapshotId : snapshotIds) {
            // SONAR-3641 We cannot process all characteristics at once
            for (List<Long> ids : Iterables.partition(characteristicIds, MAX_CHARACTERISTICS_PER_QUERY)) {
                purgeMapper.deleteSnapshotMeasuresOnCharacteristics(snapshotId, ids);
            }
        }
        session.commit();
    }

    for (Long snapshotId : snapshotIds) {
        purgeMapper.updatePurgeStatusToOne(snapshotId);
    }
    session.commit();
}

From source file:ome.services.util.DBBadAnnotationCheck.java

@Override
protected void doCheck() {
    Session session = null;//from w  ww .  ja  va 2  s .c o  m
    try {
        session = sessionFactory.openSession();
        int deleteCount = 0;
        String hql;
        hql = "DELETE FROM AnnotationAnnotationLink WHERE id IN "
                + "(SELECT link.id FROM AnnotationAnnotationLink link WHERE link.parent.class IN (:classes))";
        deleteCount += session.createQuery(hql).setParameterList("classes", badDiscriminators).executeUpdate();
        for (final String annotationLinkClass : annotationLinkClasses) {
            hql = "DELETE FROM " + annotationLinkClass + " WHERE id IN " + "(SELECT link.id FROM "
                    + annotationLinkClass + " link WHERE link.child.class IN (:classes))";
            deleteCount += session.createQuery(hql).setParameterList("classes", badDiscriminators)
                    .executeUpdate();

            final StringBuffer sb = new StringBuffer();
            sb.append("SELECT id, ");
            if (!noParentGroup.contains(annotationLinkClass)) {
                sb.append("parent.details.group.id, ");
            }
            sb.append("child.details.group.id, details.group.id");
            sb.append(" FROM ");
            sb.append(annotationLinkClass);
            hql = sb.toString();

            final Set<Long> toDelete = new HashSet<Long>();
            for (final Object resultRow : session.createQuery(hql).list()) {
                final Object[] resultArray = (Object[]) resultRow;
                final long[] ids = new long[resultArray.length];
                for (int index = 0; index < resultArray.length; index++) {
                    ids[index] = (Long) resultArray[index];
                }
                final long actualLinkId = ids[0];
                ids[0] = ids[1];
                if (isTooManyGroupIds(ids)) {
                    toDelete.add(actualLinkId);
                }
            }
            if (!toDelete.isEmpty()) {
                hql = "DELETE FROM " + annotationLinkClass + " WHERE id IN (:ids)";
                for (final List<Long> toDeleteBatch : Iterables.partition(toDelete, 256)) {
                    deleteCount += session.createQuery(hql).setParameterList("ids", toDeleteBatch)
                            .executeUpdate();
                }
            }
        }
        hql = "DELETE FROM Annotation annotation WHERE annotation.class IN (:classes)";
        deleteCount += session.createQuery(hql).setParameterList("classes", badDiscriminators).executeUpdate();
        if (deleteCount > 0) {
            log.warn("deleted bad annotations or links, count = " + deleteCount);
        } else if (log.isDebugEnabled()) {
            log.debug("verified annotations and links");
        }
    } catch (HibernateException e) {
        final String message = "error in checking annotations and links";
        log.error(message, e);
        throw new InternalException(message);
    } finally {
        if (session != null) {
            session.close();
        }
    }
}