Example usage for javax.ejb TransactionAttributeType REQUIRES_NEW

List of usage examples for javax.ejb TransactionAttributeType REQUIRES_NEW

Introduction

In this page you can find the example usage for javax.ejb TransactionAttributeType REQUIRES_NEW.

Prototype

TransactionAttributeType REQUIRES_NEW

To view the source code for javax.ejb TransactionAttributeType REQUIRES_NEW.

Click Source Link

Document

The container must invoke an enterprise bean method whose transaction attribute is set to REQUIRES_NEW with a new transaction context.

Usage

From source file:org.rhq.enterprise.server.configuration.ConfigurationManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public ResourceConfigurationUpdate persistNewResourceConfigurationUpdateHistory(Subject subject, int resourceId,
        Configuration newConfiguration, ConfigurationUpdateStatus newStatus, String newSubject,
        boolean isPartofGroupUpdate)
        throws ResourceNotFoundException, ConfigurationUpdateStillInProgressException {
    // get the resource that we will be updating
    Resource resource = resourceManager.getResourceById(subject, resourceId);

    // make sure the user has the proper permissions to do this
    if (!authorizationManager.hasResourcePermission(subject, Permission.CONFIGURE_WRITE, resource.getId())) {
        throw new PermissionException("User [" + subject.getName()
                + "] does not have permission to modify configuration for resource [" + resource + "]");
    }//from   w  w w  .j  a v  a 2 s .  c  o  m

    // see if there was a previous update request and make sure it isn't still in progress
    List<ResourceConfigurationUpdate> previousRequests = resource.getResourceConfigurationUpdates();

    String errorMessage = null;
    if (previousRequests != null) {
        for (ResourceConfigurationUpdate previousRequest : previousRequests) {
            if (previousRequest.getStatus() == ConfigurationUpdateStatus.INPROGRESS) {
                // A previous update is still in progresss for this Resource. If this update is part of a group
                // update, persist it with FAILURE status, so it is still listed as part of the group history.
                // Otherwise, throw an exception that can bubble up to the GUI.
                if (isPartofGroupUpdate) {
                    newStatus = ConfigurationUpdateStatus.FAILURE;
                    errorMessage = "Resource configuration Update was aborted because an update request for the Resource was already in progress.";
                } else {
                    // NOTE: If you change this to another exception, make sure you change getLatestResourceConfigurationUpdate().
                    throw new ConfigurationUpdateStillInProgressException("Resource [" + resource
                            + "] has a resource configuration update request already in progress - please wait for it to finish: "
                            + previousRequest);
                }
            }
        }
    }

    ResourceConfigurationUpdate current;

    // Get the latest configuration as known to the server (i.e. persisted in the DB).
    try {
        Query query = entityManager
                .createNamedQuery(ResourceConfigurationUpdate.QUERY_FIND_CURRENTLY_ACTIVE_CONFIG);
        query.setParameter("resourceId", resourceId);
        current = (ResourceConfigurationUpdate) query.getSingleResult();
        resource = current.getResource();
    } catch (NoResultException nre) {
        current = null; // The resource hasn't been successfully configured yet.
    }

    // If this update is not part of an group update, don't bother persisting a new entry if the Configuration
    // hasn't changed. If it's part of an group update, persist a new entry no matter what, so the group
    // update isn't missing any member updates.
    if (!isPartofGroupUpdate && current != null) {
        Configuration currentConfiguration = current.getConfiguration();
        Hibernate.initialize(currentConfiguration.getMap());
        if (currentConfiguration.equals(newConfiguration)) {
            return null;
        }
    }

    //Configuration zeroedConfiguration = newConfiguration.deepCopy(false);
    Configuration zeroedConfiguration = newConfiguration.deepCopyWithoutProxies();

    // create our new update request and assign it to our resource - its status will initially be "in progress"
    ResourceConfigurationUpdate newUpdateRequest = new ResourceConfigurationUpdate(resource,
            zeroedConfiguration, newSubject);

    newUpdateRequest.setStatus(newStatus);
    if (newStatus == ConfigurationUpdateStatus.FAILURE) {
        newUpdateRequest.setErrorMessage(errorMessage);
    }

    entityManager.persist(newUpdateRequest);
    if (current != null) {
        if (newStatus == ConfigurationUpdateStatus.SUCCESS) {
            // If this is the first configuration update since the resource was imported, don't alert
            notifyAlertConditionCacheManager("persistNewResourceConfigurationUpdateHistory", newUpdateRequest);
        }
    }

    resource.addResourceConfigurationUpdates(newUpdateRequest);

    // agent and childResources fields are LAZY - force them to load, because the caller will need them.
    Hibernate.initialize(resource.getChildResources());
    Hibernate.initialize(resource.getAgent());

    return newUpdateRequest;
}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private List<ProjectTeam> updateProjectTeamRole(Project project, ProjectRoleTypes teamRole) {
    return projectTeamFacade.updateTeamRole(project, teamRole);
}

From source file:org.rhq.enterprise.server.content.ContentSourceManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
public RepoSyncResults _mergePackageSyncReportADD(ContentSource contentSource, Repo repo,
        Collection<ContentProviderPackageDetails> newPackages,
        Map<ContentProviderPackageDetailsKey, PackageVersionContentSource> previous,
        RepoSyncResults syncResults, StringBuilder progress, int addCount) {
    Query q;//w  ww. j av  a  2 s  .c  o  m
    int flushCount = 0; // used to know when we should flush the entity manager - for performance purposes

    Map<ResourceType, ResourceType> knownResourceTypes = new HashMap<ResourceType, ResourceType>();
    Map<PackageType, PackageType> knownPackageTypes = new HashMap<PackageType, PackageType>();
    Map<Architecture, Architecture> knownArchitectures = new HashMap<Architecture, Architecture>();

    Map<ResourceType, Map<String, ProductVersion>> knownProductVersions = new HashMap<ResourceType, Map<String, ProductVersion>>();

    // Load this so we have the attached version in the repo <-> package mapping
    repo = entityManager.find(Repo.class, repo.getId());

    // add new packages that are new to the content source.
    // for each new package, we have to find its resource type and package type
    // (both of which must exist, or we abort that package and move on to the next);
    // then find the package and architecture, creating them if they do not exist;
    // then create the new PV as well as the new PVCS mapping.
    // if a repo is associated with the content source, the PV is directly associated with the repo.

    for (ContentProviderPackageDetails newDetails : newPackages) {
        ContentProviderPackageDetailsKey key = newDetails.getContentProviderPackageDetailsKey();

        // find the new package's associated resource type (should already exist)
        ResourceType rt = null;
        if (key.getResourceTypeName() != null && key.getResourceTypePluginName() != null) {
            rt = new ResourceType();
            rt.setName(key.getResourceTypeName());
            rt.setPlugin(key.getResourceTypePluginName());

            if (!knownResourceTypes.containsKey(rt)) {
                q = entityManager.createNamedQuery(ResourceType.QUERY_FIND_BY_NAME_AND_PLUGIN);
                q.setParameter("name", rt.getName());
                q.setParameter("plugin", rt.getPlugin());

                try {
                    rt = (ResourceType) q.getSingleResult();
                    knownResourceTypes.put(rt, rt); // cache it so we don't have to keep querying the DB
                    knownProductVersions.put(rt, new HashMap<String, ProductVersion>());
                } catch (NoResultException nre) {
                    log.warn("Content source adapter found a package for an unknown resource type ["
                            + key.getResourceTypeName() + "|" + key.getResourceTypePluginName()
                            + "] Skipping it.");
                    continue; // skip this one but move on to the next
                }
            } else {
                rt = knownResourceTypes.get(rt);
            }
        }

        // find the new package's type (package types should already exist, agent plugin descriptors define them)
        PackageType pt = new PackageType(key.getPackageTypeName(), rt);

        if (!knownPackageTypes.containsKey(pt)) {
            if (rt != null) {
                q = entityManager.createNamedQuery(PackageType.QUERY_FIND_BY_RESOURCE_TYPE_ID_AND_NAME);
                q.setParameter("typeId", rt.getId());
            } else {
                q = entityManager.createNamedQuery(PackageType.QUERY_FIND_BY_NAME_AND_NULL_RESOURCE_TYPE);
            }

            q.setParameter("name", pt.getName());

            try {
                pt = (PackageType) q.getSingleResult();
                pt.setResourceType(rt); // we don't fetch join this, but we already know it, so just set it
                knownPackageTypes.put(pt, pt); // cache it so we don't have to keep querying the DB
            } catch (NoResultException nre) {
                log.warn("Content source adapter found a package of an unknown package type ["
                        + key.getPackageTypeName() + "|" + rt + "] Skipping it.");
                continue; // skip this one but move on to the next
            }
        } else {
            pt = knownPackageTypes.get(pt);
        }

        // create the new package, if one does not already exist
        // we don't bother caching these - we won't have large amounts of the same packages
        q = entityManager.createNamedQuery(Package.QUERY_FIND_BY_NAME_PKG_TYPE_RESOURCE_TYPE);
        q.setParameter("name", newDetails.getName());
        q.setParameter("packageTypeName", newDetails.getPackageTypeName());
        q.setParameter("resourceTypeId", rt != null ? rt.getId() : null);
        Package pkg;
        try {
            pkg = (Package) q.getSingleResult();
        } catch (NoResultException nre) {
            pkg = new Package(newDetails.getName(), pt);
            pkg.setClassification(newDetails.getClassification());
            // we would have liked to rely on merge cascading when we merge the PV
            // but we need to watch out for the fact that we could be running at the
            // same time an agent sent us a content report that wants to create the same package.
            // if this is too hard a hit on performance, we can comment out the below line
            // and just accept the fact we might fail if the package is created underneath us,
            // which would cause our tx to rollback. the next sync should help us survive this failure.
            pkg = this.contentManager.persistOrMergePackageSafely(pkg);
        }

        // find and, if necessary create, the architecture
        Architecture arch = new Architecture(newDetails.getArchitectureName());

        if (!knownArchitectures.containsKey(arch)) {
            q = entityManager.createNamedQuery(Architecture.QUERY_FIND_BY_NAME);
            q.setParameter("name", arch.getName());

            try {
                arch = (Architecture) q.getSingleResult();
                knownArchitectures.put(arch, arch); // cache it so we don't have to keep querying the DB
            } catch (NoResultException nre) {
                log.info("Content source adapter found a previously unknown architecture [" + arch
                        + "] - it will be added to the list of known architectures");
            }
        } else {
            arch = knownArchitectures.get(arch);
        }

        // now finally create the new package version - this cascade-persists down several levels
        // note that other content sources might already be previously defined this, so only
        // persist it if it does not yet exist
        PackageVersion pv = new PackageVersion(pkg, newDetails.getVersion(), arch);
        pv.setDisplayName(newDetails.getDisplayName());
        pv.setDisplayVersion(newDetails.getDisplayVersion());
        pv.setExtraProperties(newDetails.getExtraProperties());
        pv.setFileCreatedDate(newDetails.getFileCreatedDate());
        pv.setFileName(newDetails.getFileName());
        pv.setFileSize(newDetails.getFileSize());
        pv.setLicenseName(newDetails.getLicenseName());
        pv.setLicenseVersion(newDetails.getLicenseVersion());
        pv.setLongDescription(newDetails.getLongDescription());
        pv.setMD5(newDetails.getMD5());
        pv.setMetadata(newDetails.getMetadata());
        pv.setSHA256(newDetails.getSHA256());
        pv.setShortDescription(newDetails.getShortDescription());

        q = entityManager.createNamedQuery(PackageVersion.QUERY_FIND_BY_PACKAGE_DETAILS_KEY);
        q.setParameter("packageName", newDetails.getName());
        q.setParameter("packageTypeName", pt.getName());
        q.setParameter("resourceType", rt);
        q.setParameter("architectureName", arch.getName());
        q.setParameter("version", newDetails.getVersion());

        try {
            PackageVersion pvExisting = (PackageVersion) q.getSingleResult();

            // the PackageVersion already exists, which is OK, another content source already defined it
            // but, let's make sure the important pieces of data are the same, otherwise, two content
            // sources SAY they have the same PackageVersion, but they really don't. We warn in the log
            // but the new data will overwrite the old.
            packageVersionAttributeCheck(pvExisting, pvExisting.getExtraProperties(), pv,
                    pv.getExtraProperties(), "ExtraProps");
            packageVersionAttributeCheck(pvExisting, pvExisting.getFileSize(), pv, pv.getFileSize(),
                    "FileSize");
            packageVersionAttributeCheck(pvExisting, pvExisting.getFileName(), pv, pv.getFileName(),
                    "FileName");
            packageVersionAttributeCheck(pvExisting, pvExisting.getMD5(), pv, pv.getMD5(), "MD5");
            packageVersionAttributeCheck(pvExisting, pvExisting.getSHA256(), pv, pv.getSHA256(), "SHA256");
            // what about metadata? test that for length only? string comparision check?

            pv = pvExisting;
        } catch (NoResultException nre) {
            // this is fine, its the first time we've seen this PV, let merge just create a new one
        }

        // we normally would want to do this:
        //    pv = entityManager.merge(pv);
        // but we have to watch out for an agent sending us a content report at the same time that
        // would create this PV concurrently.  If the below line takes too hard a hit on performance,
        // we can replace it with the merge call mentioned above and hope this concurrency doesn't happen.
        // if it does happen, we will rollback our tx and we'll have to wait for the next sync to fix it.
        pv = contentManager.persistOrMergePackageVersionSafely(pv);

        // For each resource version that is supported, make sure we have an entry for that in product version
        Set<String> resourceVersions = newDetails.getResourceVersions();

        // the check for null resource type shouldn't be necessary here because
        // the package shouldn't declare any resource versions if it doesn't declare a resource type.
        // Nevertheless, let's make that check just to prevent disasters caused by "malicious" content
        // providers.                        
        if (resourceVersions != null && rt != null) {
            Map<String, ProductVersion> cachedProductVersions = knownProductVersions.get(rt); // we are guaranteed that this returns non-null
            for (String version : resourceVersions) {
                ProductVersion productVersion = cachedProductVersions.get(version);
                if (productVersion == null) {
                    productVersion = productVersionManager.addProductVersion(rt, version);
                    cachedProductVersions.put(version, productVersion);
                }

                ProductVersionPackageVersion mapping = new ProductVersionPackageVersion(productVersion, pv);
                entityManager.merge(mapping); // use merge just in case this mapping somehow already exists
            }
        } else if (resourceVersions != null) {
            log.info("Misbehaving content provider detected. It declares resource versions " + resourceVersions
                    + " but no resource type in package " + newDetails + ".");
        }

        // now create the mapping between the package version and content source
        // now that if the mapping already exists, we overwrite it (a rare occurrence, but could happen)
        PackageVersionContentSource newPvcs = new PackageVersionContentSource(pv, contentSource,
                newDetails.getLocation());
        newPvcs = entityManager.merge(newPvcs);

        // for all repos that are associated with this content source, add this package version directly to them
        RepoPackageVersion mapping = new RepoPackageVersion(repo, pv);
        entityManager.merge(mapping); // use merge just in case this mapping somehow already exists

        // Cleanup
        if ((++flushCount % 100) == 0) {
            knownResourceTypes.clear();
            knownPackageTypes.clear();
            knownArchitectures.clear();
            knownProductVersions.clear();
            entityManager.flush();
            entityManager.clear();
        }

        if ((++addCount % 100) == 0) {
            progress.append("...").append(addCount);
            syncResults.setResults(progress.toString());
            syncResults = repoManager.mergeRepoSyncResults(syncResults);
        }
    }

    return syncResults;
}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private List<HdfsUsers> getUsersToClean(Project project) {
    return hdfsUsersController.getAllProjectHdfsUsers(project.getName());
}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private List<HdfsGroups> getGroupsToClean(Project project) {

    return hdfsUsersController.getAllProjectHdfsGroups(project.getName());

}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private void removeKafkaTopics(Project project) throws ServiceException, InterruptedException {
    kafkaFacade.removeAllTopicsFromProject(project);
}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private void removeQuotas(Project project) {
    YarnProjectsQuota yarnProjectsQuota = yarnProjectsQuotaFacade.findByProjectName(project.getName());
    yarnProjectsQuotaFacade.remove(yarnProjectsQuota);
}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private void fixSharedDatasets(Project project, DistributedFileSystemOps dfso) throws IOException {
    List<Dataset> sharedDataSets = datasetFacade.findSharedWithProject(project);
    for (Dataset dataSet : sharedDataSets) {
        String owner = dataSet.getInode().getHdfsUser().getName();
        String group = dataSet.getInode().getHdfsGroup().getName();
        List<Inode> children = new ArrayList<>();
        inodeFacade.getAllChildren(dataSet.getInode(), children);
        for (Inode child : children) {
            if (child.getHdfsUser().getName().startsWith(project.getName() + "__")) {
                Path childPath = new Path(inodeFacade.getPath(child));
                dfso.setOwner(childPath, owner, group);
            }/*from w  w  w  .j a  va  2s  .  c o m*/
        }
    }
}

From source file:io.hops.hopsworks.common.project.ProjectController.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
private void removeGroupAndUsers(List<HdfsGroups> groups, List<HdfsUsers> users) throws IOException {
    hdfsUsersController.deleteGroups(groups);
    hdfsUsersController.deleteUsers(users);
}

From source file:org.nightlabs.jfire.accounting.AccountingManagerBean.java

@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
@RolesAllowed("_Guest_")
@Override//  w ww  .j av a2 s.c  o  m
public PaymentResult _payEnd(final PaymentID paymentID, final PaymentResult payEndClientResult,
        final boolean forceRollback) {
    if (paymentID == null)
        throw new NullPointerException("paymentID");

    if (payEndClientResult == null)
        throw new NullPointerException("payEndClientResult");

    // Store payEndClientResult into the database within a NEW TRANSACTION to
    // prevent it from being lost (if this method fails later and causes a rollback).
    try {
        paymentHelperLocal.payEnd_storePayEndClientResult(paymentID, payEndClientResult, forceRollback);
    } catch (final RuntimeException x) {
        throw x;
    } catch (final Exception x) {
        throw new RuntimeException(x);
    }

    final String[] fetchGroups = new String[] { FetchPlan.DEFAULT };

    try {

        return paymentHelperLocal.payEnd_internal(paymentID, fetchGroups, NLJDOHelper.MAX_FETCH_DEPTH_NO_LIMIT);

    } catch (final Throwable t) {
        logger.error("payEnd_internal(...) failed: " + paymentID, t);
        final PaymentResult payEndServerResult = new PaymentResult(getOrganisationID(), t);

        try {
            final PaymentResult payEndServerResult_detached = paymentHelperLocal.payEnd_storePayEndServerResult(
                    paymentID, payEndServerResult, true, fetchGroups, NLJDOHelper.MAX_FETCH_DEPTH_NO_LIMIT);

            paymentHelperLocal.payRollback(paymentID);

            return payEndServerResult_detached;
        } catch (final RuntimeException x) {
            throw x;
        } catch (final Exception x) {
            throw new RuntimeException(x);
        }
    }
}