List of usage examples for javax.ejb TransactionAttributeType REQUIRES_NEW
TransactionAttributeType REQUIRES_NEW
To view the source code for javax.ejb TransactionAttributeType REQUIRES_NEW.
Click Source Link
REQUIRES_NEW
with a new transaction context. From source file:pl.psnc.synat.wrdz.mdz.integrity.IntegrityProcessorBean.java
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void finishCycle() { long total = objectDao.countAll(); long corrupted = objectDao.countCorrupted(); Date start = objectDao.getFirstAdded(); Date end = objectDao.getLastVerified(); String log = String.format( "Finished integrity verification [%s - %s]: verified %d objects, out of which %d were corrupted", start, end, total, corrupted); logger.info(log);/* w ww .j a va 2s. com*/ objectDao.deleteAll(); }
From source file:org.rhq.enterprise.server.measurement.MeasurementDataManagerBean.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @TransactionTimeout(6 * 60 * 60)//from w ww . j av a 2 s. co m public int purgeTraits(long oldest) { Connection conn = null; PreparedStatement stmt = null; try { conn = rhqDs.getConnection(); stmt = conn.prepareStatement(MeasurementDataTrait.NATIVE_QUERY_PURGE); stmt.setLong(1, oldest); long startTime = System.currentTimeMillis(); int deleted = stmt.executeUpdate(); MeasurementMonitor.getMBean().incrementPurgeTime(System.currentTimeMillis() - startTime); MeasurementMonitor.getMBean().setPurgedMeasurementTraits(deleted); return deleted; } catch (Exception e) { throw new RuntimeException("Failed to purge traits older than [" + oldest + "]", e); } finally { JDBCUtil.safeClose(conn, stmt, null); } }
From source file:org.rhq.enterprise.server.resource.ResourceFactoryManagerBean.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void completeDeleteResourceRequest(DeleteResourceResponse response) { log.debug("Received call to complete delete resource: " + response); // Load the persisted history entry DeleteResourceHistory history = entityManager.find(DeleteResourceHistory.class, response.getRequestId()); // There is some inconsistency if we're completing a request that was not in the database if (history == null) { log.error("Attemping to complete a request that was not found in the database: " + response.getRequestId()); return;/*from ww w . j a va 2s .com*/ } // Update the history entry history.setErrorMessage(response.getErrorMessage()); history.setStatus(response.getStatus()); // If successful mark resource as deleted and uninventory children if (response.getStatus() == DeleteResourceStatus.SUCCESS) { Resource resource = history.getResource(); // get doomed children Set<Resource> children = resource.getChildResources(); // set the resource deleted and update the db in case it matters to the child operations resource.setInventoryStatus(InventoryStatus.DELETED); //resource.setParentResource(null); can't null this out since the query DeleteResourceHistory.QUERY_FIND_BY_PARENT_RESOURCE_ID needs it resource.setItime(System.currentTimeMillis()); entityManager.merge(resource); // uninventory the children of the deleted resource (see rhq-2378) uninventoryChildren(children); } }
From source file:org.niord.core.mail.ScheduledMailService.java
/** * Called every day to delete old scheduled mails. * * Note to self: It would have been faster to execute a "delete from ScheduledMail where..." statement. * However, this will fail because of a missing mail - recipient "delete on cascade" FK constraint. * Using JPAs CascadeType.ALL for the relation does NOT work in this case. *//*w ww . j a v a 2s . c o m*/ @Schedule(persistent = false, second = "48", minute = "28", hour = "05") @Lock(LockType.WRITE) @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) private void deleteExpiredMails() { // If expiryDate is 0 (actually, non-positive), never delete mails if (mailDeleteAfterDays <= 0) { return; } Date expiryDate = TimeUtils.add(new Date(), Calendar.DATE, -mailDeleteAfterDays); List<Integer> ids = em.createNamedQuery("ScheduledMail.findExpiredMails", Integer.class) .setParameter("expiryDate", expiryDate).getResultList(); if (!ids.isEmpty()) { long t0 = System.currentTimeMillis(); try { for (int x = 0; x < ids.size(); x++) { ScheduledMail mail = getScheduledMail(ids.get(x)); if (mail != null) { em.remove(mail); if ((x % 50) == 0) { em.flush(); } } } log.info("Deleted " + ids.size() + " scheduled mails older than " + expiryDate + " in " + (System.currentTimeMillis() - t0) + " ms"); } catch (Exception e) { log.error("Failed deleting scheduled mails older than " + expiryDate); } } }
From source file:org.rhq.enterprise.server.cloud.StatusManagerBean.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void markGlobalCache() { Query updateServerQuery = entityManager.createNamedQuery(Server.QUERY_UPDATE_STATUS_BY_NAME); updateServerQuery.setParameter("identity", serverManager.getIdentity()); int serversUpdated = updateServerQuery.executeUpdate(); /*//from w w w. j av a 2s. c o m * this is informational debugging only - do NOT change the status bits here */ if (log.isDebugEnabled()) { Server server = serverManager.getServer(); log.debug("Marking status, server[id=" + server.getId() + ", status=" + server.getStatus() + "]"); log.debug("Servers updated: " + serversUpdated); } }
From source file:org.rhq.enterprise.server.measurement.MeasurementOOBManagerBean.java
/** * Computes the OOBs for the last hour.//from w w w . java2 s . c o m * This is done by getting the latest timestamp of the 1h table and invoking * #computeOOBsFromHourBeginingAt * @param subject Caller */ @TransactionAttribute(value = TransactionAttributeType.REQUIRES_NEW) public void computeOOBsFromLastHour(Subject subject) { Query q = entityManager.createNamedQuery(MeasurementDataNumeric1H.GET_MAX_TIMESTAMP); Object res = q.getSingleResult(); if (res == null) { if (log.isDebugEnabled()) log.debug("No data yet in 1h table, nothing to do"); return; // no data in that table yet - nothing to do. } long timeStamp = (Long) res; // check if we did this already (because the server did not get data for > 1h q = entityManager.createNamedQuery(MeasurementOOB.COUNT_FOR_DATE); q.setParameter("timestamp", timeStamp); Long count = (Long) q.getSingleResult(); if (count == 0) computeOOBsFromHourBeginingAt(subject, timeStamp); else log.info("Calculation of OOBs already done for hour " + new Date(timeStamp)); }
From source file:pl.psnc.synat.wrdz.zmkd.plan.MigrationPlanProcessorBean.java
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public MigrationProcessingResult processOne(long planId, List<TransformationInfo> path) throws MigrationProcessingException, MigrationPlanNotFoundException { MigrationPlan plan = migrationPlanManager.getMigrationPlanById(planId); UserDto owner = userBrowser.getUser(plan.getOwnerId()); if (owner == null) { throw new WrdzRuntimeException("Missing owner"); }/*from www . java2 s . com*/ String objectIdentifier = getCurrentObjectIdentifier(planId); if (objectIdentifier == null) { return MigrationProcessingResult.FINISHED; } Long objectId = identifierBrowser.getObjectId(objectIdentifier); if (objectId == null) { migrationItemManager.logError(planId, objectIdentifier, null); throw new MigrationProcessingException(planId, "Object does not exist: " + objectIdentifier); } if (!objectPermissionManager.hasPermission(owner.getUsername(), objectId, ObjectPermissionType.METADATA_UPDATE)) { migrationItemManager.logPermissionError(planId, objectIdentifier, null); throw new MigrationProcessingException(planId, "Insufficient access rights: " + objectIdentifier); } migrationItemManager.logMigrationStarted(planId, objectIdentifier); HttpClient client = httpsClientHelper.getHttpsClient(WrdzModule.ZMKD); HttpGet get = new HttpGet(zmkdConfiguration.getZmdObjectUrl(objectIdentifier)); HttpResponse response = null; File digitalObjectFile; File workDir; try { synchronized (this) { response = client.execute(get); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_ACCEPTED) { migrationPlanManager.logWaitingForObject(planId, objectIdentifier); return MigrationProcessingResult.PAUSED; } } if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { migrationItemManager.logFetchingError(planId, objectIdentifier, response.getStatusLine().getStatusCode() + ""); throw new MigrationProcessingException(planId, "Could not fetch object from ZMD: " + response.getStatusLine()); } workDir = new File(zmkdConfiguration.getWorkingDirectory(uuidGenerator.generateCacheFolderName())); workDir.mkdir(); digitalObjectFile = httpsClientHelper.storeResponseEntity(workDir, response.getEntity(), response.getFirstHeader("Content-Disposition")); ZipUtility.unzip(digitalObjectFile, workDir); } catch (IOException e) { migrationItemManager.logFetchingError(planId, objectIdentifier, null); throw new MigrationProcessingException(planId, "Could not fetch object from ZMD", e); } finally { if (response != null) { EntityUtils.consumeQuietly(response.getEntity()); } } DigitalObjectInfo objectInfo = reader.parseMets(workDir, METS_PATH); try { planExecutionManager.transform(objectInfo, path); } catch (TransformationException e) { migrationItemManager.logServiceError(planId, objectIdentifier, e.getServiceIri()); throw new MigrationProcessingException(planId, "Transformation failed", e); } Map<String, File> targetFiles = new HashMap<String, File>(); Map<String, Integer> fileSequence = new HashMap<String, Integer>(); for (DataFileInfo dataFileInfo : objectInfo.getFiles()) { String filename = dataFileInfo.getPath(); if (filename.startsWith(CONTENT_PREFIX)) { filename = filename.substring(CONTENT_PREFIX.length()); } targetFiles.put(filename, dataFileInfo.getFile()); if (dataFileInfo.getSequence() != null) { fileSequence.put(filename, dataFileInfo.getSequence()); } } MigrationType originType; switch (objectInfo.getType()) { case MASTER: originType = MigrationType.TRANSFORMATION; break; case OPTIMIZED: originType = MigrationType.OPTIMIZATION; break; case CONVERTED: originType = MigrationType.CONVERSION; break; default: throw new WrdzRuntimeException("Unexpected type: " + objectInfo.getType()); } try { String requestId = saveObject(client, targetFiles, fileSequence, objectIdentifier, originType); migrationItemManager.logUploaded(planId, objectIdentifier, requestId); } catch (IOException e) { migrationItemManager.logCreationError(planId, objectIdentifier, null); throw new MigrationProcessingException(planId, "Upload failed", e); } return MigrationProcessingResult.PROCESSED; }
From source file:org.rhq.enterprise.server.cloud.StatusManagerBean.java
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) public void updateByMeasurementBaseline(int baselineId) { log.debug("About to mark status by measurement baseline"); // baselines refer to measurement-based alert conditions, thus only agent statuses need to be set Query updateAgentQuery = entityManager.createNamedQuery(Agent.QUERY_UPDATE_STATUS_BY_MEASUREMENT_BASELINE); updateAgentQuery.setParameter("baselineId", baselineId); updateAgentQuery.executeUpdate();//from ww w .j a v a 2 s . c om /* * this is informational debugging only - do NOT change the status bits here */ if (log.isDebugEnabled()) { MeasurementBaseline baseline = entityManager.find(MeasurementBaseline.class, baselineId); Agent agent = baseline.getSchedule().getResource().getAgent(); log.debug("Marking status, agent[id=" + agent.getId() + ", status=" + agent.getStatus() + "] for measurementBaseline[id=" + baselineId + "]"); } }
From source file:gwap.rest.User.java
@POST @Consumes(MediaType.APPLICATION_JSON)/*from ww w.ja v a 2 s.c o m*/ @Produces(MediaType.APPLICATION_JSON) @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @Transactional @Path("/{id:[A-Za-z0-9][A-Za-z0-9]*}") public Response createUser(@PathParam("id") String deviceId, String payloadString) { Query query = entityManager.createNamedQuery("person.byDeviceId"); query.setParameter("deviceId", deviceId); if (query.getResultList().size() > 0) { log.info("createUser(#0): Person does already exist, not creating again", deviceId); return Response.status(Response.Status.NOT_FOUND).build(); } Person person = new Person(); person.setUsername(""); person.setDeviceId(deviceId); person.setExternalUsername("Neuling"); person.setLastLogin(new Date()); entityManager.persist(person); entityManager.flush(); JSONObject userObject = getUserStatistics(deviceId, person); log.info("createUser(#0): Person #1 created successfully.", deviceId, person); return Response.ok(userObject.toString(), MediaType.APPLICATION_JSON).build(); }
From source file:org.rhq.enterprise.server.bundle.BundleManagerBean.java
@Override @TransactionAttribute(TransactionAttributeType.REQUIRES_NEW) @RequiredPermission(Permission.MANAGE_BUNDLE) public BundleResourceDeploymentHistory addBundleResourceDeploymentHistory(Subject subject, int bundleDeploymentId, BundleResourceDeploymentHistory history) throws Exception { BundleResourceDeployment resourceDeployment = entityManager.find(BundleResourceDeployment.class, bundleDeploymentId);/*from w w w . ja v a2 s . co m*/ if (null == resourceDeployment) { throw new IllegalArgumentException("Invalid bundleDeploymentId: " + bundleDeploymentId); } resourceDeployment.addBundleResourceDeploymentHistory(history); this.entityManager.persist(resourceDeployment); return history; }