List of usage examples for javax.persistence EntityManager flush
public void flush();
From source file:com.sun.socialsite.userapi.UserManagerImpl.java
private UserRole getOrCreateRole(String roleName) throws PersistenceException { UserRole role = null;/*www .ja v a 2 s . c o m*/ try { Query query = getNamedQuery("UserRole.findByRoleName"); query.setParameter("roleName", roleName); role = (UserRole) query.getSingleResult(); } catch (NoResultException nre) { // create the role in database EntityManager em2 = null; try { em2 = getEmf().createEntityManager(); em2.getTransaction().begin(); UserRole newRole = new UserRole(); newRole.setRoleName(roleName); em2.persist(newRole); em2.flush(); em2.getTransaction().commit(); } catch (PersistenceException pe) { if (em2 == null) { // If we couldn't even create an EntityManager, something is clearly wrong throw pe; } else { // Otherwise, ignore exception for now; the role may have been created in another thread if (em2.getTransaction().isActive()) em2.getTransaction().rollback(); } } finally { if (em2 != null) em2.close(); } } // If role is null, try again (since it _should_ now exist in the DB). if (role == null) { Query query = getNamedQuery("UserRole.findByRoleName"); query.setParameter("roleName", roleName); role = (UserRole) query.getSingleResult(); } return role; }
From source file:org.rhq.enterprise.server.content.test.RepoManagerBeanTest.java
@Test(enabled = ENABLED) public void addRepoRelationship() throws Exception { // Setup//from w w w .j a v a 2 s.c o m EntityManager entityManager = getEntityManager(); Repo repo = new Repo("repo1"); Repo relatedRepo = new Repo("repo2"); repo = repoManager.createRepo(overlord, repo); relatedRepo = repoManager.createRepo(overlord, relatedRepo); String relationshipTypeName = "testRelationshipType"; RepoRelationshipType relationshipType = new RepoRelationshipType(relationshipTypeName); entityManager.persist(relationshipType); entityManager.flush(); // Test repoManager.addRepoRelationship(overlord, repo.getId(), relatedRepo.getId(), relationshipTypeName); // Verify RepoCriteria repoCriteria = new RepoCriteria(); repoCriteria.fetchRepoRepoGroups(true); repoCriteria.addFilterId(repo.getId()); PageList<Repo> repoPageList = repoManager.findReposByCriteria(overlord, repoCriteria); assert repoPageList.size() == 1; Repo persistedRepo = repoPageList.get(0); Set<RepoRepoRelationship> relationships = persistedRepo.getRepoRepoRelationships(); assert relationships.size() == 1; RepoRepoRelationship relationship = relationships.iterator().next(); assert relationship.getRepoRepoRelationshipPK().getRepo().getName().equals("repo1"); assert relationship.getRepoRepoRelationshipPK().getRepoRelationship().getRelatedRepo().getName() .equals("repo2"); assert relationship.getRepoRepoRelationshipPK().getRepoRelationship().getRepoRelationshipType().getName() .equals(relationshipTypeName); // Cleanup handled by rollback in tear down method }
From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java
@Override @AggrEventsTransactional/* www.j a v a 2 s. c o m*/ public EventProcessingResult doCloseAggregations() { if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) { throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME + " must be owned by the current thread and server"); } final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true); //Update status with current server name final String serverName = this.portalInfoProvider.getUniqueServerName(); cleanUnclosedStatus.setServerName(serverName); cleanUnclosedStatus.setLastStart(new DateTime()); //Determine date of most recently aggregated data final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao .getEventAggregatorStatus(ProcessingType.AGGREGATION, false); if (eventAggregatorStatus == null || eventAggregatorStatus.getLastEventDate() == null) { //Nothing has been aggregated, skip unclosed cleanup cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(0, null, null, true); } final DateTime lastAggregatedDate = eventAggregatorStatus.getLastEventDate(); //If lastCleanUnclosedDate is null use the oldest date dimension as there can be //no aggregations that exist before it final DateTime lastCleanUnclosedDate; if (cleanUnclosedStatus.getLastEventDate() == null) { final DateDimension oldestDateDimension = this.dateDimensionDao.getOldestDateDimension(); lastCleanUnclosedDate = oldestDateDimension.getDate().toDateTime(); } else { lastCleanUnclosedDate = cleanUnclosedStatus.getLastEventDate(); } if (!(lastCleanUnclosedDate.isBefore(lastAggregatedDate))) { logger.debug("No events aggregated since last unclosed aggregation cleaning, skipping clean: {}", lastAggregatedDate); return new EventProcessingResult(0, lastCleanUnclosedDate, lastAggregatedDate, true); } //Switch to flush on commit to avoid flushes during queries final EntityManager entityManager = this.getEntityManager(); entityManager.flush(); entityManager.setFlushMode(FlushModeType.COMMIT); //Track the number of closed aggregations and the last date of a cleaned interval int closedAggregations = 0; int cleanedIntervals = 0; DateTime cleanUnclosedEnd; final Thread currentThread = Thread.currentThread(); final String currentName = currentThread.getName(); try { currentThread.setName(currentName + "-" + lastCleanUnclosedDate + "-" + lastAggregatedDate); //Local caches used to reduce db io final IntervalsForAggregatorHelper intervalsForAggregatorHelper = new IntervalsForAggregatorHelper(); final Map<AggregationInterval, AggregationIntervalInfo> previousIntervals = new HashMap<AggregationInterval, AggregationIntervalInfo>(); //A DateTime within the next interval to close aggregations in DateTime nextIntervalDate = lastCleanUnclosedDate; do { //Reset our goal of catching up to the last aggregated event on every iteration cleanUnclosedEnd = lastAggregatedDate; //For each interval the aggregator supports, cleanup the unclosed aggregations for (final AggregationInterval interval : intervalsForAggregatorHelper.getHandledIntervals()) { final AggregationIntervalInfo previousInterval = previousIntervals.get(interval); if (previousInterval != null && nextIntervalDate.isBefore(previousInterval.getEnd())) { logger.debug( "{} interval before {} has already been cleaned during this execution, ignoring", interval, previousInterval.getEnd()); continue; } //The END date of the last clean session will find us the next interval to clean final AggregationIntervalInfo nextIntervalToClean = intervalHelper.getIntervalInfo(interval, nextIntervalDate); previousIntervals.put(interval, nextIntervalToClean); if (nextIntervalToClean == null) { continue; } final DateTime start = nextIntervalToClean.getStart(); final DateTime end = nextIntervalToClean.getEnd(); if (!end.isBefore(lastAggregatedDate)) { logger.debug("{} interval between {} and {} is still active, ignoring", new Object[] { interval, start, end }); continue; } //Track the oldest interval end, this ensures that nothing is missed if (end.isBefore(cleanUnclosedEnd)) { cleanUnclosedEnd = end; } logger.debug("Cleaning unclosed {} aggregations between {} and {}", new Object[] { interval, start, end }); for (final IPortalEventAggregator<PortalEvent> portalEventAggregator : portalEventAggregators) { checkShutdown(); final Class<? extends IPortalEventAggregator<?>> aggregatorType = getClass( portalEventAggregator); //Get aggregator specific interval info config final AggregatedIntervalConfig aggregatorIntervalConfig = intervalsForAggregatorHelper .getAggregatorIntervalConfig(aggregatorType); //If the aggregator is being used for the specified interval call cleanUnclosedAggregations if (aggregatorIntervalConfig.isIncluded(interval)) { closedAggregations += portalEventAggregator.cleanUnclosedAggregations(start, end, interval); } } cleanedIntervals++; } //Set the next interval to the end date from the last aggregation run nextIntervalDate = cleanUnclosedEnd; logger.debug("Closed {} aggregations across {} interval before {} with goal of {}", new Object[] { closedAggregations, cleanedIntervals, cleanUnclosedEnd, lastAggregatedDate }); //Loop until either the batchSize of cleaned aggregations has been reached or no aggregation work is done } while (closedAggregations <= cleanUnclosedAggregationsBatchSize && cleanedIntervals <= cleanUnclosedIntervalsBatchSize && cleanUnclosedEnd.isBefore(lastAggregatedDate)); } finally { currentThread.setName(currentName); } //Update the status object and store it cleanUnclosedStatus.setLastEventDate(cleanUnclosedEnd); cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(closedAggregations, lastCleanUnclosedDate, lastAggregatedDate, !cleanUnclosedEnd.isBefore(lastAggregatedDate)); }
From source file:org.eclipse.jubula.client.core.persistence.ProjectPM.java
/** * Persists the given project to the DB. This is performed in a new session. * When this method returns, the project will not be attached to any session. * @param proj ProjectPO to be saved.// w w w. j a va2 s. co m * @param newProjectName * name part of the ProjectNamePO. If there is no new name, this * parameter must be null (same project, different version) * @param mapperList a List of INameMapper to persist names (Parameter). * @param compNameBindingList a List of Component Name mappers to persist * names (Component). * @throws PMException in case of any db error * @throws ProjectDeletedException if project is already deleted * @throws InterruptedException if the operation is canceled */ public static void saveProject(IProjectPO proj, String newProjectName, List<INameMapper> mapperList, List<IWritableComponentNameMapper> compNameBindingList) throws PMException, ProjectDeletedException, InterruptedException { final EntityManager saveSession = Persistor.instance().openSession(); EntityTransaction tx = null; try { tx = Persistor.instance().getTransaction(saveSession); saveSession.persist(proj); proj.setParentProjectId(proj.getId()); saveSession.flush(); if (newProjectName != null) { ProjectNameBP.getInstance().setName(saveSession, proj.getGuid(), newProjectName); } ProjectNameBP.getInstance().storeTransientNames(saveSession); for (INameMapper mapper : mapperList) { mapper.persist(saveSession, proj.getId()); } for (IWritableComponentNameMapper compNameBinding : compNameBindingList) { CompNamePM.flushCompNames(saveSession, proj.getId(), compNameBinding); } Persistor.instance().commitTransaction(saveSession, tx); for (INameMapper mapper : mapperList) { mapper.updateStandardMapperAndCleanup(proj.getId()); } for (IComponentNameMapper compNameCache : compNameBindingList) { compNameCache.getCompNameCache().updateStandardMapperAndCleanup(proj.getId()); } } catch (PersistenceException e) { if (tx != null) { Persistor.instance().rollbackTransaction(saveSession, tx); } if (e.getCause() instanceof InterruptedException) { // Operation was canceled. throw new InterruptedException(); } String msg = Messages.CantSaveProject + StringConstants.DOT; throw new PMSaveException(msg + e.getMessage(), MessageIDs.E_ATTACH_PROJECT); } catch (IncompatibleTypeException ite) { if (tx != null) { Persistor.instance().rollbackTransaction(saveSession, tx); } String msg = Messages.CantSaveProject + StringConstants.DOT; throw new PMSaveException(msg + ite.getMessage(), MessageIDs.E_ATTACH_PROJECT); } finally { Persistor.instance().dropSession(saveSession); } }
From source file:gr.upatras.ece.nam.baker.impl.BakerJpaController.java
public void saveProperty(BakerProperty p) { logger.info("Will BakerProperty = " + p.getName()); EntityManager entityManager = entityManagerFactory.createEntityManager(); EntityTransaction entityTransaction = entityManager.getTransaction(); entityTransaction.begin();//from w w w . j a va 2 s . c om entityManager.persist(p); entityManager.flush(); entityTransaction.commit(); }
From source file:gr.upatras.ece.nam.baker.impl.BakerJpaController.java
public void saveInstalledBun(InstalledBun is) { logger.info("Will create InstalledBun = " + is.getUuid()); EntityManager entityManager = entityManagerFactory.createEntityManager(); EntityTransaction entityTransaction = entityManager.getTransaction(); entityTransaction.begin();//from w w w . j ava 2 s. c om entityManager.persist(is); entityManager.flush(); entityTransaction.commit(); }
From source file:gr.upatras.ece.nam.baker.impl.BakerJpaController.java
public void saveSubscribedResource(SubscribedResource sm) { logger.info("Will save SubscribedResource = " + sm.getURL()); EntityManager entityManager = entityManagerFactory.createEntityManager(); EntityTransaction entityTransaction = entityManager.getTransaction(); entityTransaction.begin();/*from www . j av a 2s . c om*/ entityManager.persist(sm); entityManager.flush(); entityTransaction.commit(); }
From source file:org.apereo.portal.events.aggr.PortalRawEventsAggregatorImpl.java
@Override @AggrEventsTransactional/*from www .j a va 2 s . c o m*/ public EventProcessingResult doCloseAggregations() { if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) { throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME + " must be owned by the current thread and server"); } final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.CLEAN_UNCLOSED, true); //Update status with current server name final String serverName = this.portalInfoProvider.getUniqueServerName(); cleanUnclosedStatus.setServerName(serverName); cleanUnclosedStatus.setLastStart(new DateTime()); //Determine date of most recently aggregated data final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.AGGREGATION, false); if (eventAggregatorStatus == null || eventAggregatorStatus.getLastEventDate() == null) { //Nothing has been aggregated, skip unclosed cleanup cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(0, null, null, true); } final DateTime lastAggregatedDate = eventAggregatorStatus.getLastEventDate(); //If lastCleanUnclosedDate is null use the oldest date dimension as there can be //no aggregations that exist before it final DateTime lastCleanUnclosedDate; if (cleanUnclosedStatus.getLastEventDate() == null) { final DateDimension oldestDateDimension = this.dateDimensionDao.getOldestDateDimension(); lastCleanUnclosedDate = oldestDateDimension.getDate().toDateTime(); } else { lastCleanUnclosedDate = cleanUnclosedStatus.getLastEventDate(); } if (!(lastCleanUnclosedDate.isBefore(lastAggregatedDate))) { logger.debug("No events aggregated since last unclosed aggregation cleaning, skipping clean: {}", lastAggregatedDate); return new EventProcessingResult(0, lastCleanUnclosedDate, lastAggregatedDate, true); } //Switch to flush on commit to avoid flushes during queries final EntityManager entityManager = this.getEntityManager(); entityManager.flush(); entityManager.setFlushMode(FlushModeType.COMMIT); //Track the number of closed aggregations and the last date of a cleaned interval int closedAggregations = 0; int cleanedIntervals = 0; DateTime cleanUnclosedEnd; final Thread currentThread = Thread.currentThread(); final String currentName = currentThread.getName(); try { currentThread.setName(currentName + "-" + lastCleanUnclosedDate + "-" + lastAggregatedDate); //Local caches used to reduce db io final IntervalsForAggregatorHelper intervalsForAggregatorHelper = new IntervalsForAggregatorHelper(); final Map<AggregationInterval, AggregationIntervalInfo> previousIntervals = new HashMap<AggregationInterval, AggregationIntervalInfo>(); //A DateTime within the next interval to close aggregations in DateTime nextIntervalDate = lastCleanUnclosedDate; do { //Reset our goal of catching up to the last aggregated event on every iteration cleanUnclosedEnd = lastAggregatedDate; //For each interval the aggregator supports, cleanup the unclosed aggregations for (final AggregationInterval interval : intervalsForAggregatorHelper.getHandledIntervals()) { final AggregationIntervalInfo previousInterval = previousIntervals.get(interval); if (previousInterval != null && nextIntervalDate.isBefore(previousInterval.getEnd())) { logger.debug( "{} interval before {} has already been cleaned during this execution, ignoring", interval, previousInterval.getEnd()); continue; } //The END date of the last clean session will find us the next interval to clean final AggregationIntervalInfo nextIntervalToClean = intervalHelper.getIntervalInfo(interval, nextIntervalDate); previousIntervals.put(interval, nextIntervalToClean); if (nextIntervalToClean == null) { continue; } final DateTime start = nextIntervalToClean.getStart(); final DateTime end = nextIntervalToClean.getEnd(); if (!end.isBefore(lastAggregatedDate)) { logger.debug("{} interval between {} and {} is still active, ignoring", new Object[] { interval, start, end }); continue; } //Track the oldest interval end, this ensures that nothing is missed if (end.isBefore(cleanUnclosedEnd)) { cleanUnclosedEnd = end; } logger.debug("Cleaning unclosed {} aggregations between {} and {}", new Object[] { interval, start, end }); for (final IntervalAwarePortalEventAggregator<PortalEvent> portalEventAggregator : intervalAwarePortalEventAggregators) { checkShutdown(); final Class<? extends IPortalEventAggregator<?>> aggregatorType = getClass( portalEventAggregator); //Get aggregator specific interval info config final AggregatedIntervalConfig aggregatorIntervalConfig = intervalsForAggregatorHelper .getAggregatorIntervalConfig(aggregatorType); //If the aggregator is being used for the specified interval call cleanUnclosedAggregations if (aggregatorIntervalConfig.isIncluded(interval)) { closedAggregations += portalEventAggregator.cleanUnclosedAggregations(start, end, interval); } } cleanedIntervals++; } //Set the next interval to the end date from the last aggregation run nextIntervalDate = cleanUnclosedEnd; logger.debug("Closed {} aggregations across {} interval before {} with goal of {}", new Object[] { closedAggregations, cleanedIntervals, cleanUnclosedEnd, lastAggregatedDate }); //Loop until either the batchSize of cleaned aggregations has been reached or no aggregation work is done } while (closedAggregations <= cleanUnclosedAggregationsBatchSize && cleanedIntervals <= cleanUnclosedIntervalsBatchSize && cleanUnclosedEnd.isBefore(lastAggregatedDate)); } finally { currentThread.setName(currentName); } //Update the status object and store it cleanUnclosedStatus.setLastEventDate(cleanUnclosedEnd); cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(closedAggregations, lastCleanUnclosedDate, lastAggregatedDate, !cleanUnclosedEnd.isBefore(lastAggregatedDate)); }
From source file:gr.upatras.ece.nam.baker.impl.BakerJpaController.java
public void saveUser(BakerUser bu) { logger.info("Will save BakerUser = " + bu.getName()); EntityManager entityManager = entityManagerFactory.createEntityManager(); EntityTransaction entityTransaction = entityManager.getTransaction(); entityTransaction.begin();/*from w ww . ja v a 2 s . c o m*/ entityManager.persist(bu); entityManager.flush(); entityTransaction.commit(); }
From source file:org.mule.module.jpa.command.Detach.java
public Object execute(EntityManager entityManager, Object entity, Map<String, Object> parameters, Boolean flush) throws Exception { logger.debug("Detaching entity: " + entity); entityManager.detach(entity);/*from w ww.j av a 2s .c o m*/ if (flush) { entityManager.flush(); } return entity; }