Example usage for javax.ejb TransactionAttributeType REQUIRES_NEW

List of usage examples for javax.ejb TransactionAttributeType REQUIRES_NEW

Introduction

In this page you can find the example usage for javax.ejb TransactionAttributeType REQUIRES_NEW.

Prototype

TransactionAttributeType REQUIRES_NEW

To view the source code for javax.ejb TransactionAttributeType REQUIRES_NEW.

Click Source Link

Document

The container must invoke an enterprise bean method whose transaction attribute is set to REQUIRES_NEW with a new transaction context.

Usage

From source file:org.rhq.enterprise.server.resource.group.ResourceGroupManagerBean.java

@RequiredPermission(Permission.MANAGE_INVENTORY)
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void enableRecursivityForGroup(Subject subject, int groupId)
        throws ResourceGroupNotFoundException, ResourceGroupUpdateException {

    // step 1: clear the implicit and preparation for adding a different set of resources to it
    clearImplicitResources(groupId);/*from   w w  w  .  j a v a2 s  .c  o m*/

    // step 2: prepare the list of resources to be used to pass to the method that does the recursive logic
    List<Integer> explicitResourceIdList = resourceManager.findExplicitResourceIdsByResourceGroup(groupId);

    // step 3: add the explicit resources back, this time with the recursive bit flipped on
    addResourcesToGroupImplicit(subject, groupId, explicitResourceIdList, false, true);
}

From source file:org.rhq.enterprise.server.alert.AlertManagerBean.java

/**
 * Remove alerts for the specified range of time.
 *///from w  w  w  .j a  va 2s  . c o  m
// gonna use bulk delete, make sure we are in new tx to not screw up caller's hibernate session
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
@TransactionTimeout(6 * 60 * 60)
public int deleteAlerts(long beginTime, long endTime) {
    long totalTime = 0;

    long start = System.currentTimeMillis();
    Query query = entityManager.createNamedQuery(AlertConditionLog.QUERY_DELETE_BY_ALERT_CTIME);
    query.setParameter("begin", beginTime);
    query.setParameter("end", endTime);
    int conditionsDeleted = query.executeUpdate();
    long end = System.currentTimeMillis();
    log.debug("Deleted [" + conditionsDeleted + "] alert condition logs in [" + (end - start) + "]ms");
    totalTime += (end - start);

    start = System.currentTimeMillis();
    query = entityManager.createNamedQuery(AlertNotificationLog.QUERY_DELETE_BY_ALERT_CTIME);
    query.setParameter("begin", beginTime);
    query.setParameter("end", endTime);
    int deletedNotifications = query.executeUpdate();
    end = System.currentTimeMillis();
    log.debug("Deleted [" + deletedNotifications + "] alert notifications in [" + (end - start) + "]ms");
    totalTime += (end - start);

    start = System.currentTimeMillis();
    query = entityManager.createNamedQuery(Alert.QUERY_DELETE_BY_CTIME);
    query.setParameter("begin", beginTime);
    query.setParameter("end", endTime);
    int deletedAlerts = query.executeUpdate();
    end = System.currentTimeMillis();
    log.debug("Deleted [" + deletedAlerts + "] alerts in [" + (end - start) + "]ms");
    totalTime += (end - start);

    MeasurementMonitor.getMBean().incrementPurgeTime(totalTime);
    MeasurementMonitor.getMBean().setPurgedAlerts(deletedAlerts);
    MeasurementMonitor.getMBean().setPurgedAlertConditions(conditionsDeleted);
    MeasurementMonitor.getMBean().setPurgedAlertNotifications(deletedNotifications);
    log.debug("Deleted [" + (deletedAlerts + conditionsDeleted + deletedNotifications) + "] "
            + "alert audit records in [" + (totalTime) + "]ms");

    return deletedAlerts;
}

From source file:org.niord.core.message.MessageService.java

/**
 * Updates the status of the given message
 *
 * @param uid the UID of the message//from   w  w w  .ja  v  a  2 s . c  o  m
 * @param status    the status
 */
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public Message updateStatus(String uid, Status status) throws Exception {
    Date now = new Date();
    Message message = findByUid(uid);
    Status prevStatus = message.getStatus();

    // Check that a valid status transition is requested
    if (!getValidStatusTransitions(prevStatus).contains(status)) {
        throw new Exception("Invalid status transition " + prevStatus + " -> " + status);
    }

    // Register who last updated he message
    message.setLastUpdatedBy(userService.currentUser());

    // Update the status
    message.setStatus(status);

    // When published, update dates and the message series
    if (prevStatus.isDraft() && status == Status.PUBLISHED) {

        // Update the publish date needs updating
        if (message.getPublishDateFrom() == null || message.getPublishDateFrom().after(now)) {
            message.setPublishDateFrom(now);
        }

        // If no event dates are defined, add event dates based on publish start date
        message.checkEventDateIntervalsUponPublishStart();

        // Assign a new message number and short ID
        messageSeriesService.updateMessageIdsFromMessageSeries(message, true);

    } else if (status == Status.CANCELLED || status == Status.EXPIRED) {

        // Update the publish date needs updating
        if (message.getPublishDateTo() == null || message.getPublishDateTo().before(now)) {
            message.setPublishDateTo(now);
        }

        // Update or remove open-ended event date intervals based on the publish end date.
        message.checkEventDateIntervalsUponPublishEnd();
    }

    // Add or remove the message from any message-recording publication message tags
    publicationService.updateRecordingPublications(message, prevStatus);

    message = saveMessage(message);

    // Broadcast the status change to any listener
    sendStatusUpdate(message, prevStatus);

    return message;
}

From source file:edu.harvard.iq.dvn.core.study.StudyFileServiceBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void addIngestedFiles(Long studyId, String versionNote, List fileBeans, Long userId) {
    // if no files, then just return
    if (fileBeans.isEmpty()) {
        return;// w  w w  .  j a  v  a 2s .  c  o  m
    }

    // first some initialization
    StudyVersion studyVersion = null;
    Study study = null;
    MD5Checksum md5Checksum = new MD5Checksum();

    study = em.find(Study.class, studyId);
    studyVersion = study.getEditVersion();
    if (studyVersion.getId() == null) {
        em.persist(studyVersion);
        em.flush();
    }

    studyVersion.setVersionNote(versionNote);

    VDCUser user = userService.find(userId);

    File newDir = new File(FileUtil.getStudyFileDir(),
            study.getAuthority() + File.separator + study.getStudyId());
    if (!newDir.exists()) {
        newDir.mkdirs();
    }

    // now iterate through fileBeans
    Iterator iter = fileBeans.iterator();
    while (iter.hasNext()) {
        StudyFileEditBean fileBean = (StudyFileEditBean) iter.next();

        // for now the logic is if the DSB does not return a file, don't copy
        // over anything; this is to cover the situation with the Ingest servlet
        // that uses takes a control card file to add a dataTable to a prexisting
        // file; this will have to change if we do this two files method at the
        // time of the original upload
        // (TODO: figure out what this comment means - ? - L.A.)
        // (is this some legacy thing? - it's talking about "ingest servlet"...)
        // (did we ever have a mechanism for adding a data table to an existing
        //  tab file?? - that's actually kinda cool)

        StudyFile f = fileBean.getStudyFile();

        // So, if there is a file: let's move it to its final destination
        // in the study directory. 
        //
        // First, if it's a subsettable or network, or any other
        // kind potentially, that gets transformed on ingest: 

        File newIngestedLocationFile = null;

        if (fileBean.getIngestedSystemFileLocation() != null) {

            String originalFileType = f.getFileType();

            // 1. move ingest-created file:

            File tempIngestedFile = new File(fileBean.getIngestedSystemFileLocation());
            newIngestedLocationFile = new File(newDir, f.getFileSystemName());
            try {
                FileUtil.copyFile(tempIngestedFile, newIngestedLocationFile);
                tempIngestedFile.delete();
                if (f instanceof TabularDataFile) {
                    f.setFileType("text/tab-separated-values");
                }
                f.setFileSystemLocation(newIngestedLocationFile.getAbsolutePath());

            } catch (IOException ex) {
                throw new EJBException(ex);
            }
            // 1b. If this is a NetworkDataFile,  move the SQLite file from the temp Ingested location to the system location
            if (f instanceof NetworkDataFile) {
                File tempSQLDataFile = new File(tempIngestedFile.getParent(), FileUtil
                        .replaceExtension(tempIngestedFile.getName(), NetworkDataServiceBean.SQLITE_EXTENSION));
                File newSQLDataFile = new File(newDir,
                        f.getFileSystemName() + "." + NetworkDataServiceBean.SQLITE_EXTENSION);

                File tempNeo4jDir = new File(tempIngestedFile.getParent(), FileUtil
                        .replaceExtension(tempIngestedFile.getName(), NetworkDataServiceBean.NEO4J_EXTENSION));
                File newNeo4jDir = new File(newDir,
                        f.getFileSystemName() + "." + NetworkDataServiceBean.NEO4J_EXTENSION);

                try {
                    FileUtil.copyFile(tempSQLDataFile, newSQLDataFile);
                    FileUtils.copyDirectory(tempNeo4jDir, newNeo4jDir);
                    tempSQLDataFile.delete();
                    FileUtils.deleteDirectory(tempNeo4jDir);
                    f.setOriginalFileType(originalFileType);

                } catch (IOException ex) {
                    throw new EJBException(ex);
                }
            }

            // 2. also move original file for archiving
            File tempOriginalFile = new File(fileBean.getTempSystemFileLocation());
            File newOriginalLocationFile = new File(newDir, "_" + f.getFileSystemName());
            try {
                if (fileBean.getControlCardSystemFileLocation() != null
                        && fileBean.getControlCardType() != null) {
                    // 2a. For the control card-based ingests (SPSS and DDI), we save
                    // a zipped bundle of both the card and the raw data file
                    // (TAB-delimited or CSV):

                    FileInputStream instream = null;
                    byte[] dataBuffer = new byte[8192];

                    ZipOutputStream zout = new ZipOutputStream(new FileOutputStream(newOriginalLocationFile));

                    // First, the control card:

                    File controlCardFile = new File(fileBean.getControlCardSystemFileLocation());

                    ZipEntry ze = new ZipEntry(controlCardFile.getName());
                    instream = new FileInputStream(controlCardFile);
                    zout.putNextEntry(ze);

                    int k = 0;
                    while ((k = instream.read(dataBuffer)) > 0) {
                        zout.write(dataBuffer, 0, k);
                        zout.flush();
                    }

                    instream.close();

                    // And then, the data file:

                    ze = new ZipEntry(tempOriginalFile.getName());
                    instream = new FileInputStream(tempOriginalFile);
                    zout.putNextEntry(ze);

                    while ((k = instream.read(dataBuffer)) > 0) {
                        zout.write(dataBuffer, 0, k);
                        zout.flush();
                    }

                    instream.close();

                    zout.close();

                    // and control card file can be deleted now:
                    controlCardFile.delete();

                    // Mime types: 
                    // These are custom, made-up types, used to identify the 
                    // type of the source data:

                    if (fileBean.getControlCardType().equals("spss")) {
                        f.setOriginalFileType("application/x-dvn-csvspss-zip");
                    } else if (fileBean.getControlCardType().equals("ddi")) {
                        f.setOriginalFileType("application/x-dvn-tabddi-zip");
                    } else {
                        logger.info("WARNING: unknown control card-based Ingest type? -- "
                                + fileBean.getControlCardType());
                        f.setOriginalFileType(originalFileType);
                    }
                    f.setMd5(md5Checksum.CalculateMD5(tempOriginalFile.getAbsolutePath()));

                } else {
                    // 2b. Otherwise, simply store the data that was used for
                    // ingest as the original:

                    FileUtil.copyFile(tempOriginalFile, newOriginalLocationFile);
                    f.setOriginalFileType(originalFileType);
                    f.setMd5(md5Checksum.CalculateMD5(newOriginalLocationFile.getAbsolutePath()));
                }
                tempOriginalFile.delete();
            } catch (IOException ex) {
                throw new EJBException(ex);
            }
        } else if (f instanceof SpecialOtherFile) {
            // "Special" OtherFiles are still OtherFiles; we just add the file
            // uploaded by the user to the study as is:

            File tempIngestedFile = new File(fileBean.getTempSystemFileLocation());
            newIngestedLocationFile = new File(newDir, f.getFileSystemName());
            try {
                FileUtil.copyFile(tempIngestedFile, newIngestedLocationFile);
                tempIngestedFile.delete();
                f.setFileSystemLocation(newIngestedLocationFile.getAbsolutePath());
                f.setMd5(md5Checksum.CalculateMD5(newIngestedLocationFile.getAbsolutePath()));
            } catch (IOException ex) {
                throw new EJBException(ex);
            }
        }

        // Finally, if the file was copied sucessfully, 
        // attach file to study version and study

        if (newIngestedLocationFile != null && newIngestedLocationFile.exists()) {

            fileBean.getFileMetadata().setStudyVersion(studyVersion);
            studyVersion.getFileMetadatas().add(fileBean.getFileMetadata());
            fileBean.getStudyFile().setStudy(study);
            // don't need to set study side, since we're no longer using persistence cache
            //study.getStudyFiles().add(fileBean.getStudyFile());
            //fileBean.addFiletoStudy(study);

            em.persist(fileBean.getStudyFile());
            em.persist(fileBean.getFileMetadata());

        } else {
            //fileBean.getStudyFile().setSubsettable(true);
            em.merge(fileBean.getStudyFile());
        }
    }
    // calcualte UNF for study version
    try {
        studyVersion.getMetadata().setUNF(new DSBWrapper().calculateUNF(studyVersion));
    } catch (IOException e) {
        throw new EJBException("Could not calculate new study UNF");
    }

    studyService.saveStudyVersion(studyVersion, user.getId());
}

From source file:org.rhq.enterprise.server.resource.ResourceManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void uninventoryResourceAsyncWork(Subject user, int resourceId) {
    if (!authorizationManager.isOverlord(user)) {
        throw new IllegalArgumentException(
                "Only the overlord can execute out-of-band async resource delete method");
    }//from ww  w  .  j  av a2  s.  c  o m

    /*
     * even though the group removal occurs in the in-band work, there can be some group definitions that just
     * happens to perform its recalculation (either manually or schedules) in the period after the in-band work
     * completes but before the async job triggers. since the ExpressionEvaluator that underlies the bulk of the
     * dynagroup query generations automatically adds a filter to only manipulate COMMITTED resource, this work
     * should be a no-op most of the time.  however, in rare circumstances it's possible for an InventoryReport to
     * come across the wire and flip the status of resources from UNINVENTORIED back to COMMITTED.  in this case,
     * this group removal logic needs to be executed again just prior to removing the rest of the reosurce history.
     */
    boolean hasErrors = uninventoryResourcesBulkDelete(user, Arrays.asList(resourceId));
    if (hasErrors) {
        return; // return early if there were any errors, because we can't remove the resource yet
    }

    hasErrors = uninventoryResourceBulkDeleteAsyncWork(user, resourceId);
    if (hasErrors) {
        return; // return early if there were any errors, because we can't remove the resource yet
    }

    Resource attachedResource = entityManager.find(Resource.class, resourceId);
    if (log.isDebugEnabled()) {
        log.debug("Overlord is asynchronously deleting resource [" + attachedResource + "]");
    }

    // our unidirectional one-to-many mapping of drift definition makes it not possible to easily bulk delete drift definition
    // so remove them here and let cascading of delete_orphan do the work
    if (attachedResource.getDriftDefinitions() != null) {
        attachedResource.getDriftDefinitions().clear();
    }

    // one more thing, delete any autogroup backing groups
    if (attachedResource != null) {
        List<ResourceGroup> backingGroups = attachedResource.getAutoGroupBackingGroups();
        if (null != backingGroups && !backingGroups.isEmpty()) {
            int size = backingGroups.size();
            int[] backingGroupIds = new int[size];
            for (int i = 0; (i < size); ++i) {
                backingGroupIds[i] = backingGroups.get(i).getId();
            }
            try {
                resourceGroupManager.deleteResourceGroups(user, backingGroupIds);
            } catch (Throwable t) {
                if (log.isDebugEnabled()) {
                    log.error("Bulk delete error for autogroup backing group deletion for " + backingGroupIds,
                            t);
                } else {
                    log.error("Bulk delete error for autogroup backing group deletion for " + backingGroupIds
                            + ": " + t.getMessage());
                }
            }
        }
    }

    // now we can purge the resource, let cascading do the rest
    entityManager.remove(attachedResource);

    return;
}

From source file:org.rhq.enterprise.server.alert.AlertManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
@TransactionTimeout(6 * 60 * 60)/*w  w w. j  a v a  2s  .  c  o m*/
public int purgeAlerts() {
    long totalTime = 0;

    Connection conn = null;
    PreparedStatement truncateConditionLogsStatement = null;
    PreparedStatement truncateNotificationLogsStatement = null;
    PreparedStatement truncateAlertsStatement = null;
    try {
        conn = rhqDs.getConnection();

        truncateConditionLogsStatement = conn.prepareStatement(AlertConditionLog.QUERY_NATIVE_TRUNCATE_SQL);
        truncateNotificationLogsStatement = conn
                .prepareStatement(AlertNotificationLog.QUERY_NATIVE_TRUNCATE_SQL);
        truncateAlertsStatement = conn.prepareStatement(Alert.QUERY_NATIVE_TRUNCATE_SQL);

        long start = System.currentTimeMillis();
        int purgedConditions = truncateConditionLogsStatement.executeUpdate();
        long end = System.currentTimeMillis();
        log.debug("Purged [" + purgedConditions + "] alert condition logs in [" + (end - start) + "]ms");
        totalTime += (end - start);

        start = System.currentTimeMillis();
        int purgedNotifications = truncateNotificationLogsStatement.executeUpdate();
        end = System.currentTimeMillis();
        log.debug("Purged [" + purgedNotifications + "] alert notifications in [" + (end - start) + "]ms");
        totalTime += (end - start);

        start = System.currentTimeMillis();
        int purgedAlerts = truncateAlertsStatement.executeUpdate();
        end = System.currentTimeMillis();
        log.debug("Purged [" + purgedAlerts + "] alerts in [" + (end - start) + "]ms");
        totalTime += (end - start);

        MeasurementMonitor.getMBean().incrementPurgeTime(totalTime);
        MeasurementMonitor.getMBean().setPurgedAlerts(purgedAlerts);
        MeasurementMonitor.getMBean().setPurgedAlertConditions(purgedConditions);
        MeasurementMonitor.getMBean().setPurgedAlertNotifications(purgedNotifications);
        log.debug("Deleted [" + (purgedAlerts + purgedConditions + purgedNotifications) + "] "
                + "alert audit records in [" + (totalTime) + "]ms");

        return purgedAlerts;
    } catch (SQLException sqle) {
        log.error("Error purging alerts", sqle);
        throw new RuntimeException("Error purging alerts: " + sqle.getMessage());
    } finally {
        JDBCUtil.safeClose(truncateConditionLogsStatement);
        JDBCUtil.safeClose(truncateNotificationLogsStatement);
        JDBCUtil.safeClose(truncateAlertsStatement);
        JDBCUtil.safeClose(conn);
    }
}

From source file:com.flexive.ejb.beans.configuration.DivisionConfigurationEngineBean.java

/**
 * {@inheritDoc}// w  ww .  j a v a2 s . com
 */
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void createFlatStorage(String name, FxFlatStorageInfo.Type storageType, String description,
        int stringColumns, int textColumns, int bigIntColumns, int doubleColumns, int selectColumns)
        throws FxApplicationException {
    try {
        Connection con = null;
        try {
            con = Database.getNonTXDataSource().getConnection();
            FxFlatStorageManager.getInstance().createFlatStorage(con, name, storageType, description,
                    stringColumns, textColumns, bigIntColumns, doubleColumns, selectColumns);
        } catch (FxApplicationException e) {
            EJBUtils.rollback(ctx);
            throw e;
        } finally {
            Database.closeObjects(DivisionConfigurationEngineBean.class, con, null);
        }
    } catch (SQLException e) {
        EJBUtils.rollback(ctx);
        throw new FxDbException(e, "ex.db.sqlError", e.getMessage());
    }
}

From source file:org.rhq.enterprise.server.measurement.MeasurementBaselineManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public MeasurementBaseline calculateAutoBaselineForGroupInNewTransaction(Subject subject, int groupId,
        int definitionId, long startDate, long endDate, boolean save)
        throws BaselineCreationException, MeasurementNotFoundException {

    if (save && !authorizationManager.hasGroupPermission(subject, Permission.MANAGE_MEASUREMENTS, groupId)) {
        throw new PermissionException("User[" + subject.getName()
                + "] does not have permission to calculate and set baselines for group[id=" + groupId + "]");
    }/*from  ww  w  .j  a v  a 2  s  .co  m*/

    MeasurementBaseline baseline;
    try {
        baseline = calculateBaselineForGroup(groupId, definitionId, true, startDate, endDate, save);
        if (save) {
            // We have changed the baseline information for the schedule, so remove the now outdated OOB info.
            oobManager.removeOOBsForGroupAndDefinition(subject, groupId, definitionId);
        }
    } catch (DataNotAvailableException e) {
        throw new BaselineCreationException("Error fetching data for baseline calculation for group[id="
                + groupId + "], definition[id=" + definitionId + "]");
    }

    return baseline;
}

From source file:org.rhq.enterprise.server.plugin.ServerPluginsBean.java

@RequiredPermission(Permission.MANAGE_SETTINGS)
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public void purgeServerPlugin(Subject subject, PluginKey pluginKey) {
    Query q = this.entityManager.createNamedQuery(ServerPlugin.QUERY_FIND_ANY_BY_NAME);
    q.setParameter("name", pluginKey.getPluginName());
    ServerPlugin doomed = (ServerPlugin) q.getSingleResult();

    // get the reference to attach to em and use the em.remove. this cascade deletes too.
    doomed = this.entityManager.getReference(ServerPlugin.class, doomed.getId());
    this.entityManager.remove(doomed);

    log.info("Server plugin [" + pluginKey + "] has been purged from the db");
    return;/*  w w w .  j  a v  a2 s  . com*/
}

From source file:org.rhq.enterprise.server.alert.AlertDefinitionManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public AlertDefinition updateAlertDefinition(Subject subject, int alertDefinitionId,
        AlertDefinition alertDefinition, boolean purgeInternals)
        throws InvalidAlertDefinitionException, AlertDefinitionUpdateException {
    if (purgeInternals) {
        alertDefinitionManager.purgeInternals(alertDefinitionId);
    }/* ww w.ja  va 2  s.  c om*/

    /*
     * Method for catching ENABLE / DISABLE changes will use switch logic off of the delta instead of calling out to
     * the enable/disable functions
     */
    AlertDefinition oldAlertDefinition = entityManager.find(AlertDefinition.class, alertDefinitionId);

    if (checkPermission(subject, oldAlertDefinition) == false) {
        if (oldAlertDefinition.getResourceType() != null) {
            throw new PermissionException("User [" + subject.getName()
                    + "] does not have permission to modify alert templates for type ["
                    + oldAlertDefinition.getResourceType() + "]");
        } else if (oldAlertDefinition.getResourceGroup() != null) {
            throw new PermissionException("User [" + subject.getName()
                    + "] does not have permission to modify alert definitions for group ["
                    + oldAlertDefinition.getResourceGroup() + "]");
        } else {
            throw new PermissionException("User [" + subject.getName()
                    + "] does not have permission to modify alert definitions for resource ["
                    + oldAlertDefinition.getResource() + "]");
        }
    }

    /*
     * only need to check the validity of the new alert definition if the authz checks pass *and* the old definition
     * is not currently deleted
     */
    boolean isResourceLevel = (oldAlertDefinition.getResource() != null);
    checkAlertDefinition(subject, alertDefinition,
            isResourceLevel ? oldAlertDefinition.getResource().getId() : null);

    /*
     * Should not be able to update an alert definition if the old alert definition is in an invalid state
     */
    if (oldAlertDefinition.getDeleted()) {
        throw new AlertDefinitionUpdateException(
                "Can not update deleted " + oldAlertDefinition.toSimpleString());
    }

    AlertDefinitionUpdateType updateType = AlertDefinitionUpdateType.get(oldAlertDefinition, alertDefinition);

    if (isResourceLevel && ((updateType == AlertDefinitionUpdateType.JUST_DISABLED)
            || (updateType == AlertDefinitionUpdateType.STILL_ENABLED))) {
        /*
         * if you were JUST_DISABLED or STILL_ENABLED, you are coming from the ENABLED state, which means you need
         * to be removed from the cache as the first half of this update
         */
        LOG.debug("Updating AlertConditionCacheManager with AlertDefinition[ id=" + oldAlertDefinition.getId()
                + " ]...DELETING");
        for (AlertCondition nextCondition : oldAlertDefinition.getConditions()) {
            LOG.debug("OldAlertCondition[ id=" + nextCondition.getId() + " ]");
        }
        notifyAlertConditionCacheManager(subject, "updateAlertDefinition", oldAlertDefinition,
                AlertDefinitionEvent.DELETED);
    }

    /*
     * performance optimization for the common case of single-condition alerts; it's easier for the
     * out-of-band process to check whether or not ANY conditions are true rather than ALL of them
     */
    if (alertDefinition.getConditions().size() == 1) {
        alertDefinition.setConditionExpression(BooleanExpression.ANY);
    }

    oldAlertDefinition.update(alertDefinition);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Updating: " + oldAlertDefinition);
        for (AlertCondition nextCondition : oldAlertDefinition.getConditions()) {
            LOG.debug("Condition: " + nextCondition);
        }
        for (AlertNotification nextNotification : oldAlertDefinition.getAlertNotifications()) {
            LOG.debug("Notification: " + nextNotification);
            LOG.debug("Notification-Configuration: " + nextNotification.getConfiguration().toString(true));
            if (nextNotification.getExtraConfiguration() != null) {
                LOG.debug("Notification-Extra-Configuration: "
                        + nextNotification.getExtraConfiguration().toString(true));
            }
        }
    }

    fixRecoveryId(oldAlertDefinition);
    oldAlertDefinition.setMtime(System.currentTimeMillis());

    AlertDefinition newAlertDefinition = entityManager.merge(oldAlertDefinition);

    if (isResourceLevel && ((updateType == AlertDefinitionUpdateType.JUST_ENABLED)
            || (updateType == AlertDefinitionUpdateType.STILL_ENABLED))) {
        /*
         * if you were JUST_ENABLED or STILL_ENABLED, you are moving to the ENABLED state, which means you need to
         * be added to the cache as the last half of this update
         */

        boolean addToCache = false;
        // if this was a recovery alert, or was recently turned into one
        if (newAlertDefinition.getRecoveryId() != 0) {
            // only add to the cache if the to-be-recovered definition is disabled, and thus needs recovering
            AlertDefinition toBeRecoveredDefinition = getAlertDefinitionById(subject,
                    newAlertDefinition.getRecoveryId());
            if (toBeRecoveredDefinition.getEnabled() == false) {
                addToCache = true;
            }
        } else {
            addToCache = true;
        }

        if (addToCache) {
            LOG.debug("Updating AlertConditionCacheManager with AlertDefinition[ id="
                    + newAlertDefinition.getId() + " ]...CREATING");
            for (AlertCondition nextCondition : newAlertDefinition.getConditions()) {
                LOG.debug("NewAlertCondition[ id=" + nextCondition.getId() + " ]");
            }
            notifyAlertConditionCacheManager(subject, "updateAlertDefinition", newAlertDefinition,
                    AlertDefinitionEvent.CREATED);
        }
    }

    /*
     * note, nothing is done to the cache in the STILL_DISABLED case because nothing should've been in the cache to
     * begin with, and nothing needs to be added to the cache as a result
     */

    return newAlertDefinition;
}