List of usage examples for javax.ejb TransactionAttributeType REQUIRES_NEW
TransactionAttributeType REQUIRES_NEW
To view the source code for javax.ejb TransactionAttributeType REQUIRES_NEW.
Click Source Link
REQUIRES_NEW
with a new transaction context. From source file:ch.puzzle.itc.mobiliar.business.deploy.boundary.DeploymentService.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public Integer createDeploymentsReturnTrackingId(List<DeploymentEntity> selectedDeployments, Date deploymentDate, Date stateToDeploy, List<DeploymentParameter> deployParams, List<Integer> contextIds, boolean sendEmail, boolean requestOnly, boolean doSimulate, boolean isExecuteShakedownTest, boolean isNeighbourhoodTest) { Integer trackingId = sequencesService.getNextValueAndUpdate(DeploymentEntity.SEQ_NAME); Date now = new Date(); if (deploymentDate == null || deploymentDate.before(now)) { deploymentDate = now;/*from w ww. ja va 2 s .c o m*/ } for (DeploymentEntity selectedDeployment : selectedDeployments) { List<ApplicationWithVersion> applicationWithVersion = selectedDeployment.getApplicationsWithVersion(); Integer appServerGroupId = selectedDeployment.getResourceGroup().getId(); Integer releaseId = selectedDeployment.getRelease().getId(); createDeploymentForAppserver(appServerGroupId, releaseId, deploymentDate, stateToDeploy, contextIds, applicationWithVersion, deployParams, sendEmail, requestOnly, doSimulate, isExecuteShakedownTest, isNeighbourhoodTest, trackingId); } if (deploymentDate == now && !requestOnly) { deploymentEvent.fire(new DeploymentEvent(DeploymentEventType.NEW, DeploymentState.scheduled)); } return trackingId; }
From source file:org.ejbca.core.ejb.services.ServiceSessionBean.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @Override/*from w ww .j av a2 s.c o m*/ public IWorker getWorkerIfItShouldRun(Integer serviceId, long nextTimeout) { IWorker worker = null; ServiceData serviceData = serviceDataSession.findById(serviceId); ServiceConfiguration serviceConfiguration = serviceData.getServiceConfiguration(); if (!serviceConfiguration.isActive()) { if (log.isDebugEnabled()) { log.debug("Service " + serviceId + " is inactive."); } return null; // Don't return an inactive worker to run } String serviceName = serviceData.getName(); final String hostname = getHostName(); if (shouldRunOnThisNode(hostname, Arrays.asList(serviceConfiguration.getPinToNodes()))) { long oldRunTimeStamp = serviceData.getRunTimeStamp(); long oldNextRunTimeStamp = serviceData.getNextRunTimeStamp(); worker = getWorker(serviceConfiguration, serviceName, oldRunTimeStamp, oldNextRunTimeStamp); if (worker.getNextInterval() == IInterval.DONT_EXECUTE) { if (log.isDebugEnabled()) { log.debug("Service has interval IInterval.DONT_EXECUTE."); } return null; // Don't return an inactive worker to run } Date runDateCheck = new Date(oldNextRunTimeStamp); // nextRunDateCheck will typically be the same (or just a millisecond earlier) as now // here Date currentDate = new Date(); if (log.isDebugEnabled()) { Date nextRunDate = new Date(nextTimeout); log.debug("nextRunDate is: " + nextRunDate); log.debug("runDateCheck is: " + runDateCheck); log.debug("currentDate is: " + currentDate); } /* * Check if the current date is after when the service should run. If a * service on another cluster node has updated this timestamp already, * then it will return false and this service will not run. This is a * semaphore (not the best one admitted) so that services in a cluster * only runs on one node and don't compete with each other. If a worker * on one node for instance runs for a very long time, there is a chance * that another worker on another node will break this semaphore and run * as well. */ if (currentDate.after(runDateCheck)) { /* * We only update the nextRunTimeStamp if the service is allowed to run on this node. * * However, we need to make sure that no other node has already acquired the semaphore * if our current database allows non-repeatable reads. */ if (!serviceDataSession.updateTimestamps(serviceId, oldRunTimeStamp, oldNextRunTimeStamp, runDateCheck.getTime(), nextTimeout)) { log.debug( "Another node had already updated the database at this point. This node will not run."); worker = null; // Failed to update the database. } } else { worker = null; // Don't return a worker, since this node should not run } } else { worker = null; if (log.isDebugEnabled()) { log.debug("Service " + serviceName + " will not run on this node: \"" + hostname + "\", Pinned to: " + Arrays.toString(serviceConfiguration.getPinToNodes())); } } return worker; }
From source file:io.hops.hopsworks.common.project.ProjectController.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) private Project createProject(String projectName, Users user, String projectDescription, DistributedFileSystemOps dfso) throws ProjectException { if (user == null) { throw new IllegalArgumentException("User was not provided."); }//from ww w . j av a2 s . c o m if (projectFacade.numProjectsLimitReached(user)) { throw new ProjectException(RESTCodes.ProjectErrorCode.NUM_PROJECTS_LIMIT_REACHED, Level.FINE, "user: " + user.getUsername()); } else if (projectFacade.projectExists(projectName)) { throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_EXISTS, Level.FINE, "project: " + projectName); } //Create a new project object Date now = new Date(); Project project = new Project(projectName, user, now, PaymentType.PREPAID); project.setKafkaMaxNumTopics(settings.getKafkaMaxNumTopics()); project.setDescription(projectDescription); // set retention period to next 10 years by default Calendar cal = Calendar.getInstance(); cal.setTime(now); cal.add(Calendar.YEAR, 10); project.setRetentionPeriod(cal.getTime()); //set a dumy node in the project until the creation of the project folder Path dummy = new Path("/tmp/" + projectName); try { dfso.touchz(dummy); project.setInode(inodes.getInodeAtPath(dummy.toString())); } catch (IOException ex) { throw new ProjectException(RESTCodes.ProjectErrorCode.PROJECT_INODE_CREATION_ERROR, Level.SEVERE, "Couldn't get the dummy Inode at: /tmp/" + projectName, ex.getMessage(), ex); } //Persist project object this.projectFacade.persistProject(project); this.projectFacade.flushEm(); usersController.increaseNumCreatedProjects(user.getUid()); return project; }
From source file:ch.puzzle.itc.mobiliar.business.deploy.boundary.DeploymentBoundary.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public Integer createDeploymentReturnTrackingId(Integer appServerGroupId, Integer releaseId, Date deploymentDate, Date stateToDeploy, List<Integer> contextIds, List<ApplicationWithVersion> applicationWithVersion, List<DeploymentParameter> deployParams, boolean sendEmail, boolean requestOnly, boolean doSimulate, boolean isExecuteShakedownTest, boolean isNeighbourhoodTest) { Integer trackingId = sequencesService.getNextValueAndUpdate(DeploymentEntity.SEQ_NAME); Date now = new Date(); if (deploymentDate == null || deploymentDate.before(now)) { deploymentDate = now;/* www. j a v a2s .co m*/ } requestOnly = createDeploymentForAppserver(appServerGroupId, releaseId, deploymentDate, stateToDeploy, contextIds, applicationWithVersion, deployParams, sendEmail, requestOnly, doSimulate, isExecuteShakedownTest, isNeighbourhoodTest, trackingId); if (deploymentDate == now && !requestOnly) { deploymentEvent.fire(new DeploymentEvent(DeploymentEventType.NEW, DeploymentState.scheduled)); } return trackingId; }
From source file:io.hops.hopsworks.common.project.ProjectController.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) private void setProjectInode(Project project, DistributedFileSystemOps dfso) throws IOException { Inode projectInode = inodes.getProjectRoot(project.getName()); project.setInode(projectInode);/*from ww w. jav a 2s .co m*/ this.projectFacade.mergeProject(project); this.projectFacade.flushEm(); Path dumy = new Path("/tmp/" + project.getName()); dfso.rm(dumy, true); }
From source file:ch.puzzle.itc.mobiliar.business.deploy.boundary.DeploymentBoundary.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public Integer createDeploymentsReturnTrackingId(List<DeploymentEntity> selectedDeployments, Date deploymentDate, Date stateToDeploy, List<DeploymentParameter> deployParams, List<Integer> contextIds, boolean sendEmail, boolean requestOnly, boolean doSimulate, boolean isExecuteShakedownTest, boolean isNeighbourhoodTest) { Integer trackingId = sequencesService.getNextValueAndUpdate(DeploymentEntity.SEQ_NAME); Date now = new Date(); if (deploymentDate == null || deploymentDate.before(now)) { deploymentDate = now;// ww w .j a v a 2 s.c o m } for (DeploymentEntity selectedDeployment : selectedDeployments) { List<ApplicationWithVersion> applicationWithVersion = selectedDeployment.getApplicationsWithVersion(); Integer appServerGroupId = selectedDeployment.getResourceGroup().getId(); Integer releaseId = selectedDeployment.getRelease().getId(); requestOnly = createDeploymentForAppserver(appServerGroupId, releaseId, deploymentDate, stateToDeploy, contextIds, applicationWithVersion, deployParams, sendEmail, requestOnly, doSimulate, isExecuteShakedownTest, isNeighbourhoodTest, trackingId); } if (deploymentDate == now && !requestOnly) { deploymentEvent.fire(new DeploymentEvent(DeploymentEventType.NEW, DeploymentState.scheduled)); } return trackingId; }
From source file:io.hops.hopsworks.common.project.ProjectController.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) private boolean noExistingUser(String projectName) { List<HdfsUsers> hdfsUsers = hdfsUsersController.getAllProjectHdfsUsers(projectName); if (hdfsUsers != null && !hdfsUsers.isEmpty()) { LOGGER.log(Level.WARNING, "hdfs users exist for project {0}", projectName); return false; }/*from www .ja v a 2s . c om*/ return true; }
From source file:org.ejbca.core.ejb.upgrade.UpgradeSessionBean.java
@Deprecated @Override// w w w . ja va 2s . c o m @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public RoleData replaceAccessRulesInRoleNoAuth(final AuthenticationToken authenticationToken, final RoleData role, final Collection<AccessRuleData> accessRules) throws RoleNotFoundException { RoleData result = roleAccessSession.findRole(role.getPrimaryKey()); if (result == null) { final String msg = INTERNAL_RESOURCES.getLocalizedMessage("authorization.errorrolenotexists", role.getRoleName()); throw new RoleNotFoundException(msg); } Map<Integer, AccessRuleData> rulesFromResult = result.getAccessRules(); Map<Integer, AccessRuleData> rulesToResult = new HashMap<Integer, AccessRuleData>(); //Lists for logging purposes. Collection<AccessRuleData> newRules = new ArrayList<AccessRuleData>(); Collection<AccessRuleData> changedRules = new ArrayList<AccessRuleData>(); for (AccessRuleData rule : accessRules) { if (AccessRuleData.generatePrimaryKey(role.getRoleName(), rule.getAccessRuleName()) != rule .getPrimaryKey()) { throw new Error( "Role " + role.getRoleName() + " did not match up with the role that created this rule."); } Integer ruleKey = rule.getPrimaryKey(); if (rulesFromResult.containsKey(ruleKey)) { AccessRuleData oldRule = rulesFromResult.get(ruleKey); if (!oldRule.equals(rule)) { changedRules.add(oldRule); } AccessRuleData newRule = accessRuleManagementSession.setState(rule, rule.getInternalState(), rule.getRecursive()); rulesFromResult.remove(ruleKey); rulesToResult.put(newRule.getPrimaryKey(), newRule); } else { try { newRules.add(accessRuleManagementSession.createRule(rule.getAccessRuleName(), result.getRoleName(), rule.getInternalState(), rule.getRecursive())); } catch (AccessRuleExistsException e) { throw new Error("Access rule exists, but wasn't found in persistence in previous call.", e); } rulesToResult.put(rule.getPrimaryKey(), rule); } } logAccessRulesAdded(authenticationToken, role.getRoleName(), newRules); logAccessRulesChanged(authenticationToken, role.getRoleName(), changedRules); //And for whatever remains: accessRuleManagementSession.remove(rulesFromResult.values()); result.setAccessRules(rulesToResult); result = entityManager.merge(result); logAccessRulesRemoved(authenticationToken, role.getRoleName(), rulesFromResult.values()); accessTreeUpdateSession.signalForAccessTreeUpdate(); accessControlSession.forceCacheExpire(); return result; }
From source file:io.hops.hopsworks.common.project.ProjectController.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) private boolean noExistingGroup(String projectName) { List<HdfsGroups> hdfsGroups = hdfsUsersController.getAllProjectHdfsGroups(projectName); if (hdfsGroups != null && !hdfsGroups.isEmpty()) { LOGGER.log(Level.WARNING, () -> "hdfs group(s) exist for project: " + projectName + ", group(s): " + Arrays.toString(hdfsGroups.toArray())); return false; }//from w ww . j av a 2 s . co m return true; }
From source file:io.hops.hopsworks.common.project.ProjectController.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) private boolean verifyQuota(String projectName) { YarnProjectsQuota projectsQuota = yarnProjectsQuotaFacade.findByProjectName(projectName); if (projectsQuota != null) { LOGGER.log(Level.WARNING, "quota existing for project {0}", projectName); return false; }/*from ww w .j ava 2 s . co m*/ return true; }