List of usage examples for javax.persistence EntityManager setFlushMode
public void setFlushMode(FlushModeType flushMode);
From source file:com.hiperf.common.ui.server.storage.impl.PersistenceHelper.java
@Override public EntityManager getEntityManager() { EntityManager em = emByThread.get(); if (em == null || !em.isOpen()) { em = emf.createEntityManager();/* ww w . j av a2 s .co m*/ em.setFlushMode(FlushModeType.COMMIT); emByThread.set(em); } return em; }
From source file:org.apache.oozie.test.XTestCase.java
private void cleanUpDBTablesInternal() throws StoreException { EntityManager entityManager = Services.get().get(JPAService.class).getEntityManager(); entityManager.setFlushMode(FlushModeType.COMMIT); entityManager.getTransaction().begin(); Query q = entityManager.createNamedQuery("GET_WORKFLOWS"); List<WorkflowJobBean> wfjBeans = q.getResultList(); int wfjSize = wfjBeans.size(); for (WorkflowJobBean w : wfjBeans) { entityManager.remove(w);//from w ww . j a va2s. co m } q = entityManager.createNamedQuery("GET_ACTIONS"); List<WorkflowActionBean> wfaBeans = q.getResultList(); int wfaSize = wfaBeans.size(); for (WorkflowActionBean w : wfaBeans) { entityManager.remove(w); } q = entityManager.createNamedQuery("GET_COORD_JOBS"); List<CoordinatorJobBean> cojBeans = q.getResultList(); int cojSize = cojBeans.size(); for (CoordinatorJobBean w : cojBeans) { entityManager.remove(w); } q = entityManager.createNamedQuery("GET_COORD_ACTIONS"); List<CoordinatorActionBean> coaBeans = q.getResultList(); int coaSize = coaBeans.size(); for (CoordinatorActionBean w : coaBeans) { entityManager.remove(w); } q = entityManager.createNamedQuery("GET_BUNDLE_JOBS"); List<BundleJobBean> bjBeans = q.getResultList(); int bjSize = bjBeans.size(); for (BundleJobBean w : bjBeans) { entityManager.remove(w); } q = entityManager.createNamedQuery("GET_BUNDLE_ACTIONS"); List<BundleActionBean> baBeans = q.getResultList(); int baSize = baBeans.size(); for (BundleActionBean w : baBeans) { entityManager.remove(w); } q = entityManager.createNamedQuery("GET_SLA_EVENTS"); List<SLAEventBean> slaBeans = q.getResultList(); int slaSize = slaBeans.size(); for (SLAEventBean w : slaBeans) { entityManager.remove(w); } q = entityManager.createQuery("select OBJECT(w) from SLARegistrationBean w"); List<SLARegistrationBean> slaRegBeans = q.getResultList(); int slaRegSize = slaRegBeans.size(); for (SLARegistrationBean w : slaRegBeans) { entityManager.remove(w); } q = entityManager.createQuery("select OBJECT(w) from SLASummaryBean w"); List<SLASummaryBean> sdBeans = q.getResultList(); int ssSize = sdBeans.size(); for (SLASummaryBean w : sdBeans) { entityManager.remove(w); } entityManager.getTransaction().commit(); entityManager.close(); log.info(wfjSize + " entries in WF_JOBS removed from DB!"); log.info(wfaSize + " entries in WF_ACTIONS removed from DB!"); log.info(cojSize + " entries in COORD_JOBS removed from DB!"); log.info(coaSize + " entries in COORD_ACTIONS removed from DB!"); log.info(bjSize + " entries in BUNDLE_JOBS removed from DB!"); log.info(baSize + " entries in BUNDLE_ACTIONS removed from DB!"); log.info(slaSize + " entries in SLA_EVENTS removed from DB!"); log.info(slaRegSize + " entries in SLA_REGISTRATION removed from DB!"); log.info(ssSize + " entries in SLA_SUMMARY removed from DB!"); }
From source file:org.apache.oozie.tools.OozieDBExportCLI.java
private static void queryAllDBTables(String filename) throws StoreException, IOException { EntityManager manager = null; ZipOutputStream zos = null;//from w ww . ja v a 2s . c o m File file = null; try { file = new File(filename); zos = new ZipOutputStream(new FileOutputStream(file)); zos.setLevel(1); manager = Services.get().get(JPAService.class).getEntityManager(); manager.setFlushMode(FlushModeType.COMMIT); int infoSize = exportTableToJSON(manager.createNativeQuery(GET_DB_VERSION), zos, OOZIEDB_SYS_INFO_JSON); System.out.println(infoSize + " rows exported from OOZIE_SYS"); int wfjSize = exportTableToJSON(manager.createQuery(GET_WORKFLOW_JOBS), zos, OOZIEDB_WF_JSON); System.out.println(wfjSize + " rows exported from WF_JOBS"); int wfaSize = exportTableToJSON(manager.createQuery(GET_WORKFLOW_ACTIONS), zos, OOZIEDB_AC_JSON); System.out.println(wfaSize + " rows exported from WF_ACTIONS"); int cojSize = exportTableToJSON(manager.createQuery(GET_COORD_JOBS), zos, OOZIEDB_CJ_JSON); System.out.println(cojSize + " rows exported from COORD_JOBS"); int coaSize = exportTableToJSON(manager.createQuery(GET_COORD_ACTIONS), zos, OOZIEDB_CA_JSON); System.out.println(coaSize + " rows exported from COORD_ACTIONS"); int bnjSize = exportTableToJSON(manager.createQuery(GET_BUNDLE_JOBS), zos, OOZIEDB_BNJ_JSON); System.out.println(bnjSize + " rows exported from BUNDLE_JOBS"); int bnaSize = exportTableToJSON(manager.createQuery(GET_BUNDLE_ACIONS), zos, OOZIEDB_BNA_JSON); System.out.println(bnaSize + " rows exported from BUNDLE_ACTIONS"); int slaRegSize = exportTableToJSON(manager.createQuery(GET_SLA_REGISTRATIONS), zos, OOZIEDB_SLAREG_JSON); System.out.println(slaRegSize + " rows exported from SLA_REGISTRATION"); int ssSize = exportTableToJSON(manager.createQuery(GET_SLA_SUMMARYS), zos, OOZIEDB_SLASUM_JSON); System.out.println(ssSize + " rows exported from SLA_SUMMARY"); } catch (Exception e) { System.err.println("Error during dump creation: " + e.getMessage()); System.err.println(); e.printStackTrace(System.err); System.err.println(); if (file != null) { file.delete(); } System.exit(1); } finally { IOUtils.closeSafely(zos); if (manager != null) { manager.close(); } } }
From source file:org.apache.oozie.tools.OozieDBImportCLI.java
private static void importAllDBTables(String zipFileName) throws StoreException, IOException, JPAExecutorException { EntityManager entityManager = null; ZipFile zipFile = null;/*from ww w . j a va 2 s.c o m*/ try { entityManager = Services.get().get(JPAService.class).getEntityManager(); entityManager.setFlushMode(FlushModeType.COMMIT); zipFile = new ZipFile(zipFileName); checkDBVersion(entityManager, zipFile); importFrom(entityManager, zipFile, "WF_JOBS", WorkflowJobBean.class, OOZIEDB_WF_JSON); importFrom(entityManager, zipFile, "WF_ACTIONS", WorkflowActionBean.class, OOZIEDB_AC_JSON); importFrom(entityManager, zipFile, "COORD_JOBS", CoordinatorJobBean.class, OOZIEDB_CJ_JSON); importFrom(entityManager, zipFile, "COORD_ACTIONS", CoordinatorActionBean.class, OOZIEDB_CA_JSON); importFrom(entityManager, zipFile, "BUNDLE_JOBS", BundleJobBean.class, OOZIEDB_BNJ_JSON); importFrom(entityManager, zipFile, "BUNDLE_ACTIONS", BundleActionBean.class, OOZIEDB_BNA_JSON); importFrom(entityManager, zipFile, "SLA_REGISTRATION", SLARegistrationBean.class, OOZIEDB_SLAREG_JSON); importFrom(entityManager, zipFile, "SLA_SUMMARY", SLASummaryBean.class, OOZIEDB_SLASUM_JSON); } finally { if (entityManager != null) { entityManager.close(); } if (zipFile != null) { zipFile.close(); } } }
From source file:org.apereo.portal.events.aggr.PortalRawEventsAggregatorImpl.java
@Override @AggrEventsTransactional/*w ww.jav a 2 s . com*/ public EventProcessingResult doCloseAggregations() { if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) { throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME + " must be owned by the current thread and server"); } final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.CLEAN_UNCLOSED, true); //Update status with current server name final String serverName = this.portalInfoProvider.getUniqueServerName(); cleanUnclosedStatus.setServerName(serverName); cleanUnclosedStatus.setLastStart(new DateTime()); //Determine date of most recently aggregated data final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.AGGREGATION, false); if (eventAggregatorStatus == null || eventAggregatorStatus.getLastEventDate() == null) { //Nothing has been aggregated, skip unclosed cleanup cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(0, null, null, true); } final DateTime lastAggregatedDate = eventAggregatorStatus.getLastEventDate(); //If lastCleanUnclosedDate is null use the oldest date dimension as there can be //no aggregations that exist before it final DateTime lastCleanUnclosedDate; if (cleanUnclosedStatus.getLastEventDate() == null) { final DateDimension oldestDateDimension = this.dateDimensionDao.getOldestDateDimension(); lastCleanUnclosedDate = oldestDateDimension.getDate().toDateTime(); } else { lastCleanUnclosedDate = cleanUnclosedStatus.getLastEventDate(); } if (!(lastCleanUnclosedDate.isBefore(lastAggregatedDate))) { logger.debug("No events aggregated since last unclosed aggregation cleaning, skipping clean: {}", lastAggregatedDate); return new EventProcessingResult(0, lastCleanUnclosedDate, lastAggregatedDate, true); } //Switch to flush on commit to avoid flushes during queries final EntityManager entityManager = this.getEntityManager(); entityManager.flush(); entityManager.setFlushMode(FlushModeType.COMMIT); //Track the number of closed aggregations and the last date of a cleaned interval int closedAggregations = 0; int cleanedIntervals = 0; DateTime cleanUnclosedEnd; final Thread currentThread = Thread.currentThread(); final String currentName = currentThread.getName(); try { currentThread.setName(currentName + "-" + lastCleanUnclosedDate + "-" + lastAggregatedDate); //Local caches used to reduce db io final IntervalsForAggregatorHelper intervalsForAggregatorHelper = new IntervalsForAggregatorHelper(); final Map<AggregationInterval, AggregationIntervalInfo> previousIntervals = new HashMap<AggregationInterval, AggregationIntervalInfo>(); //A DateTime within the next interval to close aggregations in DateTime nextIntervalDate = lastCleanUnclosedDate; do { //Reset our goal of catching up to the last aggregated event on every iteration cleanUnclosedEnd = lastAggregatedDate; //For each interval the aggregator supports, cleanup the unclosed aggregations for (final AggregationInterval interval : intervalsForAggregatorHelper.getHandledIntervals()) { final AggregationIntervalInfo previousInterval = previousIntervals.get(interval); if (previousInterval != null && nextIntervalDate.isBefore(previousInterval.getEnd())) { logger.debug( "{} interval before {} has already been cleaned during this execution, ignoring", interval, previousInterval.getEnd()); continue; } //The END date of the last clean session will find us the next interval to clean final AggregationIntervalInfo nextIntervalToClean = intervalHelper.getIntervalInfo(interval, nextIntervalDate); previousIntervals.put(interval, nextIntervalToClean); if (nextIntervalToClean == null) { continue; } final DateTime start = nextIntervalToClean.getStart(); final DateTime end = nextIntervalToClean.getEnd(); if (!end.isBefore(lastAggregatedDate)) { logger.debug("{} interval between {} and {} is still active, ignoring", new Object[] { interval, start, end }); continue; } //Track the oldest interval end, this ensures that nothing is missed if (end.isBefore(cleanUnclosedEnd)) { cleanUnclosedEnd = end; } logger.debug("Cleaning unclosed {} aggregations between {} and {}", new Object[] { interval, start, end }); for (final IntervalAwarePortalEventAggregator<PortalEvent> portalEventAggregator : intervalAwarePortalEventAggregators) { checkShutdown(); final Class<? extends IPortalEventAggregator<?>> aggregatorType = getClass( portalEventAggregator); //Get aggregator specific interval info config final AggregatedIntervalConfig aggregatorIntervalConfig = intervalsForAggregatorHelper .getAggregatorIntervalConfig(aggregatorType); //If the aggregator is being used for the specified interval call cleanUnclosedAggregations if (aggregatorIntervalConfig.isIncluded(interval)) { closedAggregations += portalEventAggregator.cleanUnclosedAggregations(start, end, interval); } } cleanedIntervals++; } //Set the next interval to the end date from the last aggregation run nextIntervalDate = cleanUnclosedEnd; logger.debug("Closed {} aggregations across {} interval before {} with goal of {}", new Object[] { closedAggregations, cleanedIntervals, cleanUnclosedEnd, lastAggregatedDate }); //Loop until either the batchSize of cleaned aggregations has been reached or no aggregation work is done } while (closedAggregations <= cleanUnclosedAggregationsBatchSize && cleanedIntervals <= cleanUnclosedIntervalsBatchSize && cleanUnclosedEnd.isBefore(lastAggregatedDate)); } finally { currentThread.setName(currentName); } //Update the status object and store it cleanUnclosedStatus.setLastEventDate(cleanUnclosedEnd); cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(closedAggregations, lastCleanUnclosedDate, lastAggregatedDate, !cleanUnclosedEnd.isBefore(lastAggregatedDate)); }
From source file:org.apereo.portal.events.aggr.PortalRawEventsAggregatorImpl.java
private EventProcessingResult doAggregateRawEventsInternal() { if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) { throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME + " must be owned by the current thread and server"); }// w ww . jav a 2 s .co m if (!this.portalEventDimensionPopulator.isCheckedDimensions()) { //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions(); if (!populatedDimensions) { this.logger.warn( "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown"); return null; } } //Flush any dimension creation before aggregation final EntityManager entityManager = this.getEntityManager(); entityManager.flush(); entityManager.setFlushMode(FlushModeType.COMMIT); final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.AGGREGATION, true); //Update status with current server name final String serverName = this.portalInfoProvider.getUniqueServerName(); final String previousServerName = eventAggregatorStatus.getServerName(); if (previousServerName != null && !serverName.equals(previousServerName)) { this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName); final Session session = getEntityManager().unwrap(Session.class); final Cache cache = session.getSessionFactory().getCache(); cache.evictEntityRegions(); } eventAggregatorStatus.setServerName(serverName); //Calculate date range for aggregation DateTime lastAggregated = eventAggregatorStatus.getLastEventDate(); if (lastAggregated == null) { lastAggregated = portalEventDao.getOldestPortalEventTimestamp(); //No portal events to aggregate, skip aggregation if (lastAggregated == null) { return new EventProcessingResult(0, null, null, true); } //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao .getEventAggregatorStatus(IEventAggregatorStatus.ProcessingType.CLEAN_UNCLOSED, true); AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated); cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1)); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); } final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute() .roundFloorCopy(); final Thread currentThread = Thread.currentThread(); final String currentName = currentThread.getName(); final MutableInt events = new MutableInt(); final MutableObject lastEventDate = new MutableObject(newestEventTime); boolean complete; try { currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime); logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated, newestEventTime); //Do aggregation, capturing the start and end dates eventAggregatorStatus.setLastStart(DateTime.now()); complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime, this.eventAggregationBatchSize, new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus)); eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue()); eventAggregatorStatus.setLastEnd(DateTime.now()); } finally { currentThread.setName(currentName); } //Store the results of the aggregation eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus); complete = complete && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize); return new EventProcessingResult(events.intValue(), lastAggregated, eventAggregatorStatus.getLastEventDate(), complete); }
From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java
@Override @AggrEventsTransactional//from w w w . ja v a 2s .c o m public EventProcessingResult doCloseAggregations() { if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) { throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME + " must be owned by the current thread and server"); } final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true); //Update status with current server name final String serverName = this.portalInfoProvider.getUniqueServerName(); cleanUnclosedStatus.setServerName(serverName); cleanUnclosedStatus.setLastStart(new DateTime()); //Determine date of most recently aggregated data final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao .getEventAggregatorStatus(ProcessingType.AGGREGATION, false); if (eventAggregatorStatus == null || eventAggregatorStatus.getLastEventDate() == null) { //Nothing has been aggregated, skip unclosed cleanup cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(0, null, null, true); } final DateTime lastAggregatedDate = eventAggregatorStatus.getLastEventDate(); //If lastCleanUnclosedDate is null use the oldest date dimension as there can be //no aggregations that exist before it final DateTime lastCleanUnclosedDate; if (cleanUnclosedStatus.getLastEventDate() == null) { final DateDimension oldestDateDimension = this.dateDimensionDao.getOldestDateDimension(); lastCleanUnclosedDate = oldestDateDimension.getDate().toDateTime(); } else { lastCleanUnclosedDate = cleanUnclosedStatus.getLastEventDate(); } if (!(lastCleanUnclosedDate.isBefore(lastAggregatedDate))) { logger.debug("No events aggregated since last unclosed aggregation cleaning, skipping clean: {}", lastAggregatedDate); return new EventProcessingResult(0, lastCleanUnclosedDate, lastAggregatedDate, true); } //Switch to flush on commit to avoid flushes during queries final EntityManager entityManager = this.getEntityManager(); entityManager.flush(); entityManager.setFlushMode(FlushModeType.COMMIT); //Track the number of closed aggregations and the last date of a cleaned interval int closedAggregations = 0; int cleanedIntervals = 0; DateTime cleanUnclosedEnd; final Thread currentThread = Thread.currentThread(); final String currentName = currentThread.getName(); try { currentThread.setName(currentName + "-" + lastCleanUnclosedDate + "-" + lastAggregatedDate); //Local caches used to reduce db io final IntervalsForAggregatorHelper intervalsForAggregatorHelper = new IntervalsForAggregatorHelper(); final Map<AggregationInterval, AggregationIntervalInfo> previousIntervals = new HashMap<AggregationInterval, AggregationIntervalInfo>(); //A DateTime within the next interval to close aggregations in DateTime nextIntervalDate = lastCleanUnclosedDate; do { //Reset our goal of catching up to the last aggregated event on every iteration cleanUnclosedEnd = lastAggregatedDate; //For each interval the aggregator supports, cleanup the unclosed aggregations for (final AggregationInterval interval : intervalsForAggregatorHelper.getHandledIntervals()) { final AggregationIntervalInfo previousInterval = previousIntervals.get(interval); if (previousInterval != null && nextIntervalDate.isBefore(previousInterval.getEnd())) { logger.debug( "{} interval before {} has already been cleaned during this execution, ignoring", interval, previousInterval.getEnd()); continue; } //The END date of the last clean session will find us the next interval to clean final AggregationIntervalInfo nextIntervalToClean = intervalHelper.getIntervalInfo(interval, nextIntervalDate); previousIntervals.put(interval, nextIntervalToClean); if (nextIntervalToClean == null) { continue; } final DateTime start = nextIntervalToClean.getStart(); final DateTime end = nextIntervalToClean.getEnd(); if (!end.isBefore(lastAggregatedDate)) { logger.debug("{} interval between {} and {} is still active, ignoring", new Object[] { interval, start, end }); continue; } //Track the oldest interval end, this ensures that nothing is missed if (end.isBefore(cleanUnclosedEnd)) { cleanUnclosedEnd = end; } logger.debug("Cleaning unclosed {} aggregations between {} and {}", new Object[] { interval, start, end }); for (final IPortalEventAggregator<PortalEvent> portalEventAggregator : portalEventAggregators) { checkShutdown(); final Class<? extends IPortalEventAggregator<?>> aggregatorType = getClass( portalEventAggregator); //Get aggregator specific interval info config final AggregatedIntervalConfig aggregatorIntervalConfig = intervalsForAggregatorHelper .getAggregatorIntervalConfig(aggregatorType); //If the aggregator is being used for the specified interval call cleanUnclosedAggregations if (aggregatorIntervalConfig.isIncluded(interval)) { closedAggregations += portalEventAggregator.cleanUnclosedAggregations(start, end, interval); } } cleanedIntervals++; } //Set the next interval to the end date from the last aggregation run nextIntervalDate = cleanUnclosedEnd; logger.debug("Closed {} aggregations across {} interval before {} with goal of {}", new Object[] { closedAggregations, cleanedIntervals, cleanUnclosedEnd, lastAggregatedDate }); //Loop until either the batchSize of cleaned aggregations has been reached or no aggregation work is done } while (closedAggregations <= cleanUnclosedAggregationsBatchSize && cleanedIntervals <= cleanUnclosedIntervalsBatchSize && cleanUnclosedEnd.isBefore(lastAggregatedDate)); } finally { currentThread.setName(currentName); } //Update the status object and store it cleanUnclosedStatus.setLastEventDate(cleanUnclosedEnd); cleanUnclosedStatus.setLastEnd(new DateTime()); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); return new EventProcessingResult(closedAggregations, lastCleanUnclosedDate, lastAggregatedDate, !cleanUnclosedEnd.isBefore(lastAggregatedDate)); }
From source file:org.jasig.portal.events.aggr.PortalRawEventsAggregatorImpl.java
private EventProcessingResult doAggregateRawEventsInternal() { if (!this.clusterLockService.isLockOwner(AGGREGATION_LOCK_NAME)) { throw new IllegalStateException("The cluster lock " + AGGREGATION_LOCK_NAME + " must be owned by the current thread and server"); }//from w w w . j av a 2 s . c o m if (!this.portalEventDimensionPopulator.isCheckedDimensions()) { //First time aggregation has happened, run populateDimensions to ensure enough dimension data exists final boolean populatedDimensions = this.portalEventAggregationManager.populateDimensions(); if (!populatedDimensions) { this.logger.warn( "Aborting raw event aggregation, populateDimensions returned false so the state of date/time dimensions is unknown"); return null; } } //Flush any dimension creation before aggregation final EntityManager entityManager = this.getEntityManager(); entityManager.flush(); entityManager.setFlushMode(FlushModeType.COMMIT); final IEventAggregatorStatus eventAggregatorStatus = eventAggregationManagementDao .getEventAggregatorStatus(ProcessingType.AGGREGATION, true); //Update status with current server name final String serverName = this.portalInfoProvider.getUniqueServerName(); final String previousServerName = eventAggregatorStatus.getServerName(); if (previousServerName != null && !serverName.equals(previousServerName)) { this.logger.debug("Last aggregation run on {} clearing all aggregation caches", previousServerName); final Session session = getEntityManager().unwrap(Session.class); final Cache cache = session.getSessionFactory().getCache(); cache.evictEntityRegions(); } eventAggregatorStatus.setServerName(serverName); //Calculate date range for aggregation DateTime lastAggregated = eventAggregatorStatus.getLastEventDate(); if (lastAggregated == null) { lastAggregated = portalEventDao.getOldestPortalEventTimestamp(); //No portal events to aggregate, skip aggregation if (lastAggregated == null) { return new EventProcessingResult(0, null, null, true); } //First time aggregation has run, initialize the CLEAN_UNCLOSED status to save catch-up time final IEventAggregatorStatus cleanUnclosedStatus = eventAggregationManagementDao .getEventAggregatorStatus(ProcessingType.CLEAN_UNCLOSED, true); AggregationIntervalInfo oldestMinuteInterval = this.intervalHelper .getIntervalInfo(AggregationInterval.MINUTE, lastAggregated); cleanUnclosedStatus.setLastEventDate(oldestMinuteInterval.getStart().minusMinutes(1)); eventAggregationManagementDao.updateEventAggregatorStatus(cleanUnclosedStatus); } final DateTime newestEventTime = DateTime.now().minus(this.aggregationDelay).secondOfMinute() .roundFloorCopy(); final Thread currentThread = Thread.currentThread(); final String currentName = currentThread.getName(); final MutableInt events = new MutableInt(); final MutableObject lastEventDate = new MutableObject(newestEventTime); boolean complete; try { currentThread.setName(currentName + "-" + lastAggregated + "_" + newestEventTime); logger.debug("Starting aggregation of events between {} (inc) and {} (exc)", lastAggregated, newestEventTime); //Do aggregation, capturing the start and end dates eventAggregatorStatus.setLastStart(DateTime.now()); complete = portalEventDao.aggregatePortalEvents(lastAggregated, newestEventTime, this.eventAggregationBatchSize, new AggregateEventsHandler(events, lastEventDate, eventAggregatorStatus)); eventAggregatorStatus.setLastEventDate((DateTime) lastEventDate.getValue()); eventAggregatorStatus.setLastEnd(DateTime.now()); } finally { currentThread.setName(currentName); } //Store the results of the aggregation eventAggregationManagementDao.updateEventAggregatorStatus(eventAggregatorStatus); complete = complete && (this.eventAggregationBatchSize <= 0 || events.intValue() < this.eventAggregationBatchSize); return new EventProcessingResult(events.intValue(), lastAggregated, eventAggregatorStatus.getLastEventDate(), complete); }