Example usage for javax.ejb TransactionAttributeType REQUIRES_NEW

List of usage examples for javax.ejb TransactionAttributeType REQUIRES_NEW

Introduction

In this page you can find the example usage for javax.ejb TransactionAttributeType REQUIRES_NEW.

Prototype

TransactionAttributeType REQUIRES_NEW

To view the source code for javax.ejb TransactionAttributeType REQUIRES_NEW.

Click Source Link

Document

The container must invoke an enterprise bean method whose transaction attribute is set to REQUIRES_NEW with a new transaction context.

Usage

From source file:org.rhq.enterprise.server.alert.AlertDefinitionManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public int createAlertDefinition(Subject subject, AlertDefinition alertDefinition, Integer resourceId)
        throws InvalidAlertDefinitionException {
    checkAlertDefinition(subject, alertDefinition, resourceId);

    // if this is an alert definition, set up the link to a resource
    if (resourceId != null) {
        // don't attach an alertTemplate or groupAlertDefinition to any particular resource
        // they should have already been attached to the resourceType or resourceGroup by the caller

        //Resource resource = LookupUtil.getResourceManager().getResourceById(user, resourceId);
        // use proxy trick to subvert having to load the entire resource into memory
        alertDefinition.setResource(new Resource(resourceId));
    }//from  www.ja  v a2  s .  c  om

    // after the resource is set up (in the case of non-templates), we can use the checkPermission on it
    if (checkPermission(subject, alertDefinition) == false) {
        if (alertDefinition.getResourceType() != null) {
            throw new PermissionException("User [" + subject.getName()
                    + "] does not have permission to create alert templates for type ["
                    + alertDefinition.getResourceType() + "]");
        } else if (alertDefinition.getResourceGroup() != null) {
            throw new PermissionException("User [" + subject.getName()
                    + "] does not have permission to create alert definitions for group ["
                    + alertDefinition.getResourceGroup() + "]");
        } else {
            throw new PermissionException("User [" + subject.getName()
                    + "] does not have permission to create alert definitions for resource ["
                    + alertDefinition.getResource() + "]");
        }
    }

    /*
     * performance optimization for the common case of single-condition alerts; it's easier for the
     * out-of-band process to check whether or not ANY conditions are true rather than ALL of them
     */
    if (alertDefinition.getConditions().size() == 1) {
        alertDefinition.setConditionExpression(BooleanExpression.ANY);
    }

    fixRecoveryId(alertDefinition);

    entityManager.persist(alertDefinition);

    boolean addToCache = false;
    // don't notify on an alert template, only for those that get attached to a resource
    // Only add to the cache if the alert definition was created as active
    if ((resourceId != null) && alertDefinition.getEnabled()) {
        // if this is a recovery alert
        if (alertDefinition.getRecoveryId() != 0) {
            // only add to the cache if the to-be-recovered definition is disabled, and thus needs recovering
            // use entityManager direct to bypass security checks, we already know this user is authorized
            AlertDefinition toBeRecoveredDefinition = entityManager.find(AlertDefinition.class,
                    alertDefinition.getRecoveryId());
            if (toBeRecoveredDefinition.getEnabled() == false) {
                addToCache = true;
            }
        } else {
            addToCache = true;
        }
    }

    if (addToCache) {
        notifyAlertConditionCacheManager(subject, "createAlertDefinition", alertDefinition,
                AlertDefinitionEvent.CREATED);
    }

    return alertDefinition.getId();
}

From source file:org.rhq.enterprise.server.measurement.MeasurementOOBManagerBean.java

/**
 * Remove OOBs for schedules that had their baselines calculated after
 * a certain cutoff point. This is used to get rid of outdated OOB data for
 * baselines that got recalculated, as the new baselines will be 'big' enough for
 * what have been OOBs before and we don't have any baseline history.
 * @param subject The caller//from   w w  w  .j a va  2  s. c om
 * @param cutoffTime The reference time to determine new baselines
 */
@TransactionAttribute(value = TransactionAttributeType.REQUIRES_NEW)
public void removeOutdatedOOBs(Subject subject, long cutoffTime) {

    Query q = entityManager.createNamedQuery(MeasurementOOB.DELETE_OUTDATED);
    q.setParameter("cutOff", cutoffTime);
    int count = q.executeUpdate();
    log.info("Removed [" + count + "] outdated OOBs");
}

From source file:org.rhq.enterprise.server.measurement.MeasurementCompressionManagerBean.java

@TransactionTimeout(60 * 60)
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public int compressDataInterval(String fromTable, String toTable, long begin, long end) throws SQLException {
    Connection conn = null;//ww w . jav a2 s. c  o  m
    PreparedStatement insStmt = null;

    log.info("Begin compressing data from table [" + fromTable + "] to table [" + toTable + "] between ["
            + TimeUtil.toString(begin) + "] and [" + TimeUtil.toString(end) + "]");

    int rows = 0;
    StopWatch watch = new StopWatch();
    try {
        conn = ((DataSource) ctx.lookup(DATASOURCE_NAME)).getConnection();

        // One special case. If we are compressing from an
        // already compressed table, we'll take the MIN and
        // MAX from the already calculated min and max columns.
        String minMax;
        if (MeasurementDataManagerUtility.isRawTable(fromTable)) {
            minMax = "AVG(value), MIN(value), MAX(value) ";
        } else {
            minMax = "AVG(value), MIN(minvalue), MAX(maxvalue) ";
        }

        // TODO GH: Why does this do each schedule separately?
        insStmt = conn.prepareStatement("INSERT INTO " + toTable + " (SELECT ?, ft.schedule_id, " + minMax
                + "  FROM " + fromTable + " ft " + "  WHERE ft.time_stamp >= ? AND ft.time_stamp < ? "
                + "  GROUP BY ft.schedule_id)");

        // Compress
        insStmt.setLong(1, begin);
        insStmt.setLong(2, begin);
        insStmt.setLong(3, end);

        watch.reset();
        rows = insStmt.executeUpdate();

        MeasurementMonitor.getMBean().incrementMeasurementCompressionTime(watch.getElapsed());
    } finally {
        JDBCUtil.safeClose(conn, insStmt, null);
    }

    log.info("Finished compressing data from table [" + fromTable + "] to table [" + toTable + "] between ["
            + TimeUtil.toString(begin) + "] and [" + TimeUtil.toString(end) + "], [" + rows
            + "] compressed rows in [" + (watch.getElapsed() / SECOND) + "] seconds");

    return rows;
}

From source file:edu.harvard.iq.dataverse.api.imports.ImportServiceBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public Dataset doImportHarvestedDataset(DataverseRequest dataverseRequest, HarvestingClient harvestingClient,
        String harvestIdentifier, String metadataFormat, File metadataFile, PrintWriter cleanupLog)
        throws ImportException, IOException {
    if (harvestingClient == null || harvestingClient.getDataverse() == null) {
        throw new ImportException(
                "importHarvestedDataset called wiht a null harvestingClient, or an invalid harvestingClient.");
    }/*from  w ww.  ja v a 2 s  .co m*/
    Dataverse owner = harvestingClient.getDataverse();
    Dataset importedDataset = null;

    DatasetDTO dsDTO = null;
    String json = null;

    // TODO: 
    // At the moment (4.5; the first official "export/harvest release"), there
    // are 3 supported metadata formats: DDI, DC and native Dataverse metadata 
    // encoded in JSON. The 2 XML formats are handled by custom implementations;
    // each of the 2 implementations uses its own parsing approach. (see the 
    // ImportDDIServiceBean and ImportGenerciServiceBean for details). 
    // TODO: Need to create a system of standardized import plugins - similar to Stephen
    // Kraffmiller's export modules; replace the logic below with clean
    // programmatic lookup of the import plugin needed. 

    if ("ddi".equalsIgnoreCase(metadataFormat) || "oai_ddi".equals(metadataFormat)
            || metadataFormat.toLowerCase().matches("^oai_ddi.*")) {
        try {
            String xmlToParse = new String(Files.readAllBytes(metadataFile.toPath()));
            // TODO: 
            // import type should be configurable - it should be possible to 
            // select whether you want to harvest with or without files, 
            // ImportType.HARVEST vs. ImportType.HARVEST_WITH_FILES
            logger.fine("importing DDI " + metadataFile.getAbsolutePath());
            dsDTO = importDDIService.doImport(ImportType.HARVEST_WITH_FILES, xmlToParse);
        } catch (IOException | XMLStreamException | ImportException e) {
            throw new ImportException(
                    "Failed to process DDI XML record: " + e.getClass() + " (" + e.getMessage() + ")");
        }
    } else if ("dc".equalsIgnoreCase(metadataFormat) || "oai_dc".equals(metadataFormat)) {
        logger.fine("importing DC " + metadataFile.getAbsolutePath());
        try {
            String xmlToParse = new String(Files.readAllBytes(metadataFile.toPath()));
            dsDTO = importGenericService.processOAIDCxml(xmlToParse);
        } catch (IOException | XMLStreamException e) {
            throw new ImportException(
                    "Failed to process Dublin Core XML record: " + e.getClass() + " (" + e.getMessage() + ")");
        }
    } else if ("dataverse_json".equals(metadataFormat)) {
        // This is Dataverse metadata already formatted in JSON. 
        // Simply read it into a string, and pass to the final import further down:
        logger.fine(
                "Attempting to import custom dataverse metadata from file " + metadataFile.getAbsolutePath());
        json = new String(Files.readAllBytes(metadataFile.toPath()));
    } else {
        throw new ImportException("Unsupported import metadata format: " + metadataFormat);
    }

    if (json == null) {
        if (dsDTO != null) {
            // convert DTO to Json, 
            Gson gson = new GsonBuilder().setPrettyPrinting().create();
            json = gson.toJson(dsDTO);
            logger.fine("JSON produced for the metadata harvested: " + json);
        } else {
            throw new ImportException(
                    "Failed to transform XML metadata format " + metadataFormat + " into a DatasetDTO");
        }
    }

    JsonReader jsonReader = Json.createReader(new StringReader(json));
    JsonObject obj = jsonReader.readObject();
    //and call parse Json to read it into a dataset   
    try {
        JsonParser parser = new JsonParser(datasetfieldService, metadataBlockService, settingsService);
        parser.setLenient(true);
        Dataset ds = parser.parseDataset(obj);

        // For ImportType.NEW, if the metadata contains a global identifier, and it's not a protocol
        // we support, it should be rejected.
        // (TODO: ! - add some way of keeping track of supported protocols!)
        //if (ds.getGlobalId() != null && !ds.getProtocol().equals(settingsService.getValueForKey(SettingsServiceBean.Key.Protocol, ""))) {
        //    throw new ImportException("Could not register id " + ds.getGlobalId() + ", protocol not supported");
        //}
        ds.setOwner(owner);
        ds.getLatestVersion().setDatasetFields(ds.getLatestVersion().initDatasetFields());

        // Check data against required contraints
        List<ConstraintViolation<DatasetField>> violations = ds.getVersions().get(0).validateRequired();
        if (!violations.isEmpty()) {
            // For migration and harvest, add NA for missing required values
            for (ConstraintViolation<DatasetField> v : violations) {
                DatasetField f = v.getRootBean();
                f.setSingleValue(DatasetField.NA_VALUE);
            }
        }

        // Check data against validation constraints
        // If we are migrating and "scrub migration data" is true we attempt to fix invalid data
        // if the fix fails stop processing of this file by throwing exception
        Set<ConstraintViolation> invalidViolations = ds.getVersions().get(0).validate();
        ValidatorFactory factory = Validation.buildDefaultValidatorFactory();
        Validator validator = factory.getValidator();
        if (!invalidViolations.isEmpty()) {
            for (ConstraintViolation<DatasetFieldValue> v : invalidViolations) {
                DatasetFieldValue f = v.getRootBean();
                boolean fixed = false;
                boolean converted = false;
                // TODO: Is this scrubbing something we want to continue doing? 
                if (settingsService.isTrueForKey(SettingsServiceBean.Key.ScrubMigrationData, false)) {
                    fixed = processMigrationValidationError(f, cleanupLog, metadataFile.getName());
                    converted = true;
                    if (fixed) {
                        Set<ConstraintViolation<DatasetFieldValue>> scrubbedViolations = validator.validate(f);
                        if (!scrubbedViolations.isEmpty()) {
                            fixed = false;
                        }
                    }
                }
                if (!fixed) {
                    String msg = "Data modified - File: " + metadataFile.getName() + "; Field: "
                            + f.getDatasetField().getDatasetFieldType().getDisplayName() + "; "
                            + "Invalid value:  '" + f.getValue() + "'" + " Converted Value:'"
                            + DatasetField.NA_VALUE + "'";
                    cleanupLog.println(msg);
                    f.setValue(DatasetField.NA_VALUE);

                }
            }
        }

        // A Global ID is required, in order for us to be able to harvest and import
        // this dataset:
        if (StringUtils.isEmpty(ds.getGlobalId())) {
            throw new ImportException("The harvested metadata record with the OAI server identifier "
                    + harvestIdentifier
                    + " does not contain a global unique identifier that we could recognize, skipping.");
        }

        ds.setHarvestedFrom(harvestingClient);
        ds.setHarvestIdentifier(harvestIdentifier);

        Dataset existingDs = datasetService.findByGlobalId(ds.getGlobalId());

        if (existingDs != null) {
            // If this dataset already exists IN ANOTHER DATAVERSE
            // we are just going to skip it!
            if (existingDs.getOwner() != null && !owner.getId().equals(existingDs.getOwner().getId())) {
                throw new ImportException("The dataset with the global id " + ds.getGlobalId()
                        + " already exists, in the dataverse " + existingDs.getOwner().getAlias()
                        + ", skipping.");
            }
            // And if we already have a dataset with this same id, in this same
            // dataverse, but it is  LOCAL dataset (can happen!), we're going to 
            // skip it also: 
            if (!existingDs.isHarvested()) {
                throw new ImportException("A LOCAL dataset with the global id " + ds.getGlobalId()
                        + " already exists in this dataverse; skipping.");
            }
            // For harvested datasets, there should always only be one version.
            // We will replace the current version with the imported version.
            if (existingDs.getVersions().size() != 1) {
                throw new ImportException("Error importing Harvested Dataset, existing dataset has "
                        + existingDs.getVersions().size() + " versions");
            }
            // Purge all the SOLR documents associated with this client from the 
            // index server: 
            indexService.deleteHarvestedDocuments(existingDs);
            // files from harvested datasets are removed unceremoniously, 
            // directly in the database. no need to bother calling the 
            // DeleteFileCommand on them.
            for (DataFile harvestedFile : existingDs.getFiles()) {
                DataFile merged = em.merge(harvestedFile);
                em.remove(merged);
                harvestedFile = null;
            }
            // TODO: 
            // Verify what happens with the indexed files in SOLR? 
            // are they going to be overwritten by the reindexing of the dataset?
            existingDs.setFiles(null);
            Dataset merged = em.merge(existingDs);
            engineSvc.submit(new DestroyDatasetCommand(merged, dataverseRequest));
            importedDataset = engineSvc
                    .submit(new CreateDatasetCommand(ds, dataverseRequest, false, ImportType.HARVEST));

        } else {
            importedDataset = engineSvc
                    .submit(new CreateDatasetCommand(ds, dataverseRequest, false, ImportType.HARVEST));
        }

    } catch (JsonParseException | ImportException | CommandException ex) {
        logger.fine("Failed to import harvested dataset: " + ex.getClass() + ": " + ex.getMessage());
        FileOutputStream savedJsonFileStream = new FileOutputStream(
                new File(metadataFile.getAbsolutePath() + ".json"));
        byte[] jsonBytes = json.getBytes();
        int i = 0;
        while (i < jsonBytes.length) {
            int chunkSize = i + 8192 <= jsonBytes.length ? 8192 : jsonBytes.length - i;
            savedJsonFileStream.write(jsonBytes, i, chunkSize);
            i += chunkSize;
            savedJsonFileStream.flush();
        }
        savedJsonFileStream.close();
        logger.info("JSON produced saved in " + metadataFile.getAbsolutePath() + ".json");
        throw new ImportException(
                "Failed to import harvested dataset: " + ex.getClass() + " (" + ex.getMessage() + ")", ex);
    }
    return importedDataset;
}

From source file:dk.dma.msinm.service.MessageService.java

/**
 * Updates the given message/*from w  w w  . j  a  v  a 2 s. c om*/
 *
 * @param messageVo the template for the message to update
 * @return the updated message
 */
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public Message updateMessage(MessageVo messageVo) throws Exception {

    Message message = messageVo.toEntity();
    final Message original = getByPrimaryKey(Message.class, message.getId());

    // Validate the message
    SeriesIdentifier id = message.getSeriesIdentifier();
    if (message.getId() == null) {
        throw new Exception("Message not an existing message");
    }
    if (message.getType().getSeriesIdType() != id.getMainType()) {
        throw new Exception("Invalid Message type");
    }
    if (message.getValidFrom() == null) {
        throw new Exception("Message validFrom must be specified");
    }

    original.setSeriesIdentifier(message.getSeriesIdentifier());
    original.setType(message.getType());
    original.setCancellationDate(message.getCancellationDate());
    original.setHorizontalDatum(message.getHorizontalDatum());
    original.setStatus(message.getStatus());
    original.setOriginalInformation(message.isOriginalInformation());
    original.setPriority(message.getPriority());
    original.setValidFrom(message.getValidFrom());
    original.setValidTo(message.getValidTo());

    // Copy the area data
    original.copyDescsAndRemoveBlanks(message.getDescs());

    // Add the locations
    original.getLocations().clear();
    original.getLocations().addAll(message.getLocations());

    // Add the light list numbers
    original.getLightsListNumbers().clear();
    original.getLightsListNumbers().addAll(message.getLightsListNumbers());

    // Add the references
    original.getReferences().clear();
    message.getReferences().forEach(ref -> original.getReferences()
            .add(ref.isNew() ? ref : getByPrimaryKey(Reference.class, ref.getId())));

    // Copy the Area
    original.setArea(null);
    if (message.getArea() != null) {
        original.setArea(getByPrimaryKey(Area.class, message.getArea().getId()));
    }

    // Copy the Categories
    original.getCategories().clear();
    message.getCategories()
            .forEach(cat -> original.getCategories().add(getByPrimaryKey(Category.class, cat.getId())));

    // Substitute the Charts with the persisted ones
    original.getCharts().clear();
    message.getCharts().forEach(chart -> original.getCharts().add(getByPrimaryKey(Chart.class, chart.getId())));

    // Update the publications
    final Message msg = message;
    original.getPublications().forEach(pub -> pub.copyData(msg.getPublication(pub.getType())));
    // And let publishers have a say
    publishingService.updateMessage(message);

    // Persist the message
    message = saveMessage(original);
    log.info("Updated message " + message);

    em.flush();
    return message;
}

From source file:org.rhq.enterprise.server.measurement.MeasurementDataManagerBean.java

/**
 * Add metrics data to the database. Data that is passed can come from several Schedules, but needs to be of only
 * one type of MeasurementGathering. For good performance it is important that the agent sends batches as big as
 * possible (ok, perhaps not more than 100 items at a time).
 *
 * @param data the actual data points/*w  w w  . j a  v  a 2s.  co  m*/
 */
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void addNumericData(Set<MeasurementDataNumeric> data) {
    if ((data == null) || (data.isEmpty())) {
        return;
    }

    int expectedCount = data.size();

    Connection conn = null;
    DatabaseType dbType = null;

    Map<String, PreparedStatement> statements = new HashMap<String, PreparedStatement>();

    try {
        conn = rhqDs.getConnection();
        dbType = DatabaseTypeFactory.getDatabaseType(conn);

        if (dbType instanceof Postgresql83DatabaseType) {
            Statement st = null;
            try {
                // Take advantage of async commit here
                st = conn.createStatement();
                st.execute("SET synchronous_commit = off");
            } finally {
                JDBCUtil.safeClose(st);
            }
        }

        for (MeasurementDataNumeric aData : data) {
            Double value = aData.getValue();
            if ((value == null) || Double.isNaN(value) || Double.isInfinite(value)) {
                expectedCount--;
                continue;
            }

            String table = MeasurementDataManagerUtility.getTable(aData.getTimestamp());

            PreparedStatement ps = statements.get(table);

            if (ps == null) {
                String insertSql = "INSERT  /*+ APPEND */ INTO " + table
                        + "(schedule_id,time_stamp,value) VALUES(?,?,?)";
                ps = conn.prepareStatement(insertSql);
                statements.put(table, ps);
            }

            ps.setInt(1, aData.getScheduleId());
            ps.setLong(2, aData.getTimestamp());
            ps.setDouble(3, value);
            ps.addBatch();
        }

        int count = 0;
        for (PreparedStatement ps : statements.values()) {
            int[] res = ps.executeBatch();
            for (int updates : res) {
                if ((updates != 1) && (updates != -2)) // oracle returns -2 on success
                {
                    throw new MeasurementStorageException("Unexpected batch update size [" + updates + "]");
                }

                count++;
            }
        }

        if (count != expectedCount) {
            throw new MeasurementStorageException("Failure to store measurement data.");
        }

        notifyAlertConditionCacheManager("mergeMeasurementReport",
                data.toArray(new MeasurementData[data.size()]));
    } catch (SQLException e) {
        log.warn("Failure saving measurement numeric data:\n" + ThrowableUtil.getAllMessages(e));
    } catch (Exception e) {
        log.error("Error persisting numeric data", e);
    } finally {
        for (PreparedStatement ps : statements.values()) {
            JDBCUtil.safeClose(ps);
        }

        JDBCUtil.safeClose(conn);
    }
}

From source file:org.rhq.enterprise.server.cloud.StatusManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void updateByAgent(int agentId) {
    log.debug("About to mark status by agent");
    Query updateAgentQuery = entityManager.createNamedQuery(Agent.QUERY_UPDATE_STATUS_BY_AGENT);
    updateAgentQuery.setParameter("agentId", agentId);
    updateAgentQuery.executeUpdate();//  w w w .j  a  v a2s .  c  om

    /*
     * this is informational debugging only - do NOT change the status bits here
     */
    if (log.isDebugEnabled()) {
        Agent agent = entityManager.find(Agent.class, agentId);
        log.debug("Marking status, agent[id=" + agent.getId() + ", status=" + agent.getStatus() + "]");
    }
}

From source file:com.flexive.ejb.beans.configuration.DivisionConfigurationEngineBean.java

/**
 * {@inheritDoc}/* w  w  w . j  ava2  s .c  o m*/
 */
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void patchDatabase() throws FxApplicationException {
    final long oldVersion = get(SystemParameters.DB_VERSION);
    final long patchedVersion = performPatching();
    if (patchedVersion != oldVersion) {
        modifyDatabaseVersion(patchedVersion);
    }
}

From source file:org.rhq.enterprise.server.cloud.StatusManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void updateByAutoBaselineCalculationJob() {
    log.debug("About to mark status by autoBaselineCalculationJob");
    // baselines refer to measurement-based alert conditions, thus only agent statuses need to be set
    Query updateAgentQuery = entityManager.createNamedQuery(Agent.QUERY_UPDATE_STATUS_FOR_ALL);
    updateAgentQuery.executeUpdate();/*from w w w .j a  v  a 2  s. c  om*/

    /*
     * this is informational debugging only - do NOT change the status bits here
     */
    if (log.isDebugEnabled()) {
        List<Agent> agents = agentManager.getAllAgents();
        for (Agent agent : agents) {
            log.debug("Marking status, agent[id=" + agent.getId() + ", status=" + agent.getStatus()
                    + "] for AutoBaselineCalculationJob");
        }
    }
}

From source file:org.rhq.enterprise.server.event.EventManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
@TransactionTimeout(6 * 60 * 60)/* w  w w . j ava 2  s.  com*/
public int purgeEventData(Date deleteUpToTime) throws SQLException {
    Query q = entityManager.createQuery("DELETE FROM Event e WHERE e.timestamp < :cutOff");
    q.setParameter("cutOff", deleteUpToTime.getTime());
    long startTime = System.currentTimeMillis();
    int deleted = q.executeUpdate();
    MeasurementMonitor.getMBean().incrementPurgeTime(System.currentTimeMillis() - startTime);
    MeasurementMonitor.getMBean().setPurgedEvents(deleted);
    return deleted;
}