List of usage examples for org.hibernate ScrollableResults get
Object[] get();
From source file:jp.go.nict.langrid.dao.hibernate.HibernateOverUseStateDao.java
License:Open Source License
private OverUseState getNextOverUseState(ScrollableResults iLog) { if (!iLog.next()) return null; Object[] row = iLog.get(); LimitType t = (LimitType) iLog.get(6); long currentValue = 0; if (t.equals(LimitType.FREQUENCY)) { currentValue = ((Number) iLog.get(8)).longValue(); } else {/*from w w w .ja va 2 s .c om*/ currentValue = ((Number) iLog.get(9)).longValue(); } return new OverUseState((String) iLog.get(0), (String) iLog.get(1), (String) iLog.get(2), (String) iLog.get(3), (Calendar) iLog.get(4), (Period) iLog.get(5), t, (Integer) iLog.get(7), currentValue, (Calendar) iLog.get(10)); }
From source file:monasca.thresh.infrastructure.persistence.hibernate.AlarmDefinitionSqlImpl.java
License:Apache License
@SuppressWarnings("unchecked") private List<SubExpression> findSubExpressions(final Session session, final String alarmDefId) { final List<SubExpression> subExpressions = Lists.newArrayList(); Map<String, Map<String, String>> dimensionMap = Maps.newHashMap(); final DetachedCriteria subAlarmDefinitionCriteria = DetachedCriteria .forClass(SubAlarmDefinitionDb.class, "sad").createAlias("alarmDefinition", "ad") .add(Restrictions.conjunction(Restrictions.eqProperty("sad.alarmDefinition.id", "ad.id"), Restrictions.eq("sad.alarmDefinition.id", alarmDefId))) .addOrder(Order.asc("sad.id")).setProjection(Projections.property("sad.id")); final ScrollableResults subAlarmDefinitionDimensionResult = session .createCriteria(SubAlarmDefinitionDimensionDb.class).add(Property .forName("subAlarmDefinitionDimensionId.subExpression.id").in(subAlarmDefinitionCriteria)) .setReadOnly(true).scroll(ScrollMode.FORWARD_ONLY); final ScrollableResults subAlarmDefinitionResult = session .getNamedQuery(SubAlarmDefinitionDb.Queries.BY_ALARMDEFINITION_ID).setString("id", alarmDefId) .setReadOnly(true).scroll(ScrollMode.FORWARD_ONLY); while (subAlarmDefinitionDimensionResult.next()) { final SubAlarmDefinitionDimensionDb dim = (SubAlarmDefinitionDimensionDb) subAlarmDefinitionDimensionResult .get()[0];//w ww. j a va 2 s .c o m final SubAlarmDefinitionDimensionId id = dim.getSubAlarmDefinitionDimensionId(); final String subAlarmId = (String) session.getIdentifier(id.getSubExpression()); final String name = id.getDimensionName(); final String value = dim.getValue(); if (!dimensionMap.containsKey(subAlarmId)) { dimensionMap.put(subAlarmId, Maps.<String, String>newTreeMap()); } dimensionMap.get(subAlarmId).put(name, value); session.evict(dim); } while (subAlarmDefinitionResult.next()) { final SubAlarmDefinitionDb def = (SubAlarmDefinitionDb) subAlarmDefinitionResult.get()[0]; final String id = def.getId(); final AggregateFunction function = AggregateFunction.fromJson(def.getFunction()); final String metricName = def.getMetricName(); final AlarmOperator operator = AlarmOperator.fromJson(def.getOperator()); final Double threshold = def.getThreshold(); final Integer period = def.getPeriod(); final Integer periods = def.getPeriods(); final Boolean deterministic = def.isDeterministic(); Map<String, String> dimensions = dimensionMap.get(id); if (dimensions == null) { dimensions = Collections.emptyMap(); } subExpressions.add(new SubExpression(id, new AlarmSubExpression(function, new MetricDefinition(metricName, dimensions), operator, threshold, period, periods, deterministic))); session.evict(def); } subAlarmDefinitionDimensionResult.close(); subAlarmDefinitionResult.close(); return subExpressions; }
From source file:net.sf.oreka.services.RecSegmentServiceHbn.java
License:Open Source License
public int getResults(RecSegmentFilter filter, int offset, int number, String orderBy, boolean ascending, List results) {/*from ww w. j a v a 2 s .com*/ firstCriterium = true; int numResults = 0; logger.log(Level.DEBUG, "Entering getResults"); //logger.log(Level.INFO, System.getProperty("java.class.path")); //RecSegment seg1 = new RecSegment(); //RecSegment seg2 = new RecSegment(); //RecTape tape1 = new RecTape(); //RecTape tape2 = new RecTape(); /* RecSegmentResult result1 = new RecSegmentResult(); RecSegmentResult result2 = new RecSegmentResult(); result1.getRecSegment().setDuration(10); result1.getRecSegment().setLocalParty("01223"); results.add(result1); result2.getRecSegment().setDuration(11); result2.getRecSegment().setLocalParty("01440"); results.add(result2); */ /* for (int i=0; i<number; i++) { RecSegmentResult result = new RecSegmentResult(); result.getRecSegment().setDuration(offset + i); result.getRecSegment().setLocalParty(orderBy); result.getRecTape().setId(ascending ? 0:1); results.add(result); } numResults = 502; */ Transaction tx = null; Session session = null; try { session = OrkWeb.hibernateManager.getSession(); StringBuffer queryString = new StringBuffer( "from OrkSegment as seg left join seg.tape as tape left join tape.service as srv "); //StringBuffer queryString = new StringBuffer("from RecSegment as seg "); //boolean firstCriterium = false; if (filter.getStartDate() != null && filter.getEndDate() != null) queryString.append(" where seg.timestamp between :startDate and :endDate "); else if (filter.getStartDate() != null) queryString.append(" where seg.timestamp > :startDate "); else if (filter.getEndDate() != null) queryString.append(" where seg.timestamp < :endDate "); if (filter.getLocalParty().length() > 0) { queryString.append(" and seg.localParty=:localParty "); } if (filter.getRemoteParty().length() > 0) { queryString.append(" and seg.remoteParty=:remoteParty "); } if (filter.getMinDuration().length() > 0) { queryString.append(" and seg.duration>:minDuration "); } if (filter.getMaxDuration().length() > 0) { queryString.append(" and seg.duration<:maxDuration "); } if (filter.getDirection() != Direction.ALL) { queryString.append(" and seg.direction=:direction "); } if (orderBy.length() == 0) { orderBy = "seg.timestamp"; } queryString.append(" order by "); queryString.append(orderBy); if (ascending) { queryString.append(" asc"); } else { queryString.append(" desc"); } Query query = session.createQuery(queryString.toString()); SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss"); if (filter.getStartDate() != null) { logger.debug("Filter start date:" + dateFormat.format(filter.getStartDate())); query.setTimestamp("startDate", filter.getStartDate()); } if (filter.getEndDate() != null) { logger.debug("Filter end date:" + dateFormat.format(filter.getEndDate())); query.setTimestamp("endDate", filter.getEndDate()); } if (filter.getLocalParty().length() > 0) { query.setString("localParty", filter.getLocalParty()); } if (filter.getRemoteParty().length() > 0) { query.setString("remoteParty", filter.getRemoteParty()); } if (filter.getMinDuration().length() > 0) { query.setString("minDuration", filter.getMinDuration()); } if (filter.getMaxDuration().length() > 0) { query.setString("maxDuration", filter.getMaxDuration()); } if (filter.getDirection() != Direction.ALL) { query.setParameter("direction", filter.getDirection().ordinal()); //query.setParameter( "direction", filter.getDirection().name() ); } // Criteria crit = session.createCriteria(RecSegment.class); // //crit.setFetchMode("RecTape",FetchMode.EAGER); // crit.setFetchMode(null, FetchMode.LAZY); ScrollableResults scrollDocs = query.scroll(); if (scrollDocs.last()) { numResults = scrollDocs.getRowNumber() + 1; logger.debug("Num res:" + numResults); } //scrollDocs.beforeFirst(); scrollDocs.setRowNumber(offset); int rowsSoFar = 0; while (scrollDocs.get() != null && rowsSoFar < number) { rowsSoFar++; OrkSegment seg = (OrkSegment) scrollDocs.get(0); //logger.log(Level.ERROR, seg.getRecTape().getUrl()); //RecTape tape = (RecTape)scrollDocs.get(1); //RecTape tape = new RecTape(); RecSegmentResult res = new RecSegmentResult(); res.setRecSegment(seg); //res.setRecTape(tape); results.add(res); scrollDocs.next(); } } catch (HibernateException he) { if (tx != null) tx.rollback(); logger.log(Level.ERROR, he.toString()); he.printStackTrace(); } catch (Exception e) { logger.error(e); e.printStackTrace(); } finally { session.close(); } return numResults; }
From source file:org.candlepin.gutterball.curator.ComplianceSnapshotCurator.java
License:Open Source License
/** * Retrieves the compliance status counts over the given time span with the specified criteria. * The counts are returned in a map of maps, with the outer map mapping the dates to the inner * map which maps the statuses to their respective counts. * <p></p>//from w ww. jav a 2s. c o m * If the start and/or end dates are null, the time span will be similarly unrestricted. Note * that the time within a given Date object is ignored. If neither the start nor end dates are * provided, all known compliance status data will be used. * * @param startDate * The date at which the time span should begin. If null, all compliance statuses before the * end date (if provided) will be used. * * @param endDate * The date at which the time span should end. If null, all compliance statuses after the * start date (if provided) will be used. * * @param sku * A subscription sku to use to filter compliance status counts. If provided, only consumers * using the specified sku will be counted. * * @param subscriptionName * A subscription name to use to filter compliance status counts. If provided, only consumers * using subscriptions with the specified product name will be counted. * * @param productName * A product name to use to filter compliance status counts. If provided, only consumers with * an installed product with the specified product name will be counted. * * @param attributes * A map of entitlement attributes to use to filter compliance status counts. If provided, only * consumers with entitlements having the specified values for the given attributes will be * counted. * * @param ownerKey * An owner key to use to filter compliance status counts. If provided, only consumers * associated with the specified owner key/account will be counted. * * @param pageRequest * A PageRequest instance containing paging information from the request. If null, no paging * will be performed. * * @return * A page containing a map of maps containing the compliance status counts, grouped by day. If * no counts were found for the given time span, the page will contain an empty map. */ public Page<Map<Date, Map<String, Integer>>> getComplianceStatusCounts(Date startDate, Date endDate, String ownerKey, List<String> consumerUuids, String sku, String subscriptionName, String productName, Map<String, String> attributes, PageRequest pageRequest) { Page<Map<Date, Map<String, Integer>>> page = new Page<Map<Date, Map<String, Integer>>>(); page.setPageRequest(pageRequest); // Build our query... // Impl note: This query's results MUST be sorted by date in ascending order. If it's not, // the algorithm below breaks. Query query = this.buildComplianceStatusCountQuery(this.currentSession(), startDate, endDate, ownerKey, consumerUuids, sku, subscriptionName, productName, attributes); // Clamp our dates so they're no further out than "today." Date today = new Date(); if (startDate != null && startDate.after(today)) { startDate = today; } if (endDate != null && endDate.after(today)) { endDate = today; } // Execute & process results... Map<Date, Map<String, Integer>> resultmap = new TreeMap<Date, Map<String, Integer>>(); Map<String, Object[]> cstatusmap = new HashMap<String, Object[]>(); // Step through our data and do our manual aggregation bits... ScrollableResults results = query.scroll(ScrollMode.FORWARD_ONLY); if (results.next()) { Calendar date = Calendar.getInstance(); Object[] row = results.get(); String uuid = (String) row[0]; row[1] = ((String) row[1]).toLowerCase(); date.setTime((Date) row[2]); // Prime the calendars here... Calendar cdate = Calendar.getInstance(); cdate.setTime(startDate != null ? startDate : date.getTime()); cdate.set(Calendar.HOUR_OF_DAY, 23); cdate.set(Calendar.MINUTE, 59); cdate.set(Calendar.SECOND, 59); cdate.set(Calendar.MILLISECOND, 999); Calendar end = Calendar.getInstance(); end.setTimeInMillis(endDate != null ? endDate.getTime() : Long.MAX_VALUE); for (; this.compareCalendarsByDate(cdate, end) <= 0; cdate.add(Calendar.DATE, 1)) { while (this.compareCalendarsByDate(date, cdate) <= 0) { // Date is before our current date. Store the uuid's status so we can add it to // our counts later. cstatusmap.put(uuid, row); if (!results.next()) { if (endDate == null) { end.setTimeInMillis(cdate.getTimeInMillis()); } break; } row = (Object[]) results.get(); uuid = (String) row[0]; row[1] = ((String) row[1]).toLowerCase(); date.setTime((Date) row[2]); } Date hashdate = cdate.getTime(); Map<String, Integer> statusmap = new HashMap<String, Integer>(); // Go through and add up all our counts for the day. for (Object[] cstatus : cstatusmap.values()) { if (cstatus[3] == null || this.compareDatesByDate(hashdate, (Date) cstatus[3]) < 0) { Integer count = statusmap.get((String) cstatus[1]); statusmap.put((String) cstatus[1], (count != null ? count + 1 : 1)); } } resultmap.put(hashdate, statusmap); } } results.close(); // Pagination // This is horribly inefficient, but the only way to do it with the current implementation. if (pageRequest != null && pageRequest.isPaging()) { page.setMaxRecords(resultmap.size()); int offset = (pageRequest.getPage() - 1) * pageRequest.getPerPage(); int nextpage = offset + pageRequest.getPerPage(); // Trim results. :( Iterator<Date> iterator = resultmap.keySet().iterator(); for (int pos = 0; iterator.hasNext(); ++pos) { iterator.next(); if (pos < offset || pos >= nextpage) { iterator.remove(); } } } page.setPageData(resultmap); return page; }
From source file:org.candlepin.model.DetachedCandlepinQuery.java
License:Open Source License
/** * {@inheritDoc}/*from w ww . j a v a 2 s . c om*/ */ @Override @Transactional public int forEachRow(ResultProcessor<Object[]> processor) { if (processor == null) { throw new IllegalArgumentException("processor is null"); } Criteria executable = this.getExecutableCriteria(); ScrollableResults cursor = executable.scroll(ScrollMode.FORWARD_ONLY); int count = 0; try { boolean cont = true; while (cont && cursor.next()) { cont = processor.process(cursor.get()); ++count; } } finally { cursor.close(); } return count; }
From source file:org.drools.reteoo.JoinNode.java
License:Apache License
/** * Performs query to db. Operates on {@link RightRelationship} with * {@link ScrollableResults}. It's allows to operates on single fetched row, * not full query results. There is no need for flushing and/or clearing * cause that's read-only operation.//from w ww . ja v a 2 s. co m * * @param contextEntry * @param useLeftMemory * @param context * @param workingMemory * @param leftTuple */ private void getAndPropagateDBTupleFromLeft(ContextEntry[] contextEntry, boolean useLeftMemory, PropagationContext context, InternalWorkingMemory workingMemory, LeftTuple leftTuple) { int counterToClear = 0; m_relManager = DbRelationshipManager.getInstance(); final Session session = m_relManager.openSession(); final Query query = m_relManager.createQueryGetRightRelsByJoinNodeId(this.getId()); final ScrollableResults iterator = m_relManager.getScrollableResultsIterator(query); while (iterator.next()) { final RightRelationship relationship = (RightRelationship) iterator.get()[0]; final RightTuple rightTupleFromDb = createRightTuple(relationship, this); if ((++counterToClear % DbRelationshipManager.BATCH_SIZE) == 0) { session.clear(); } propagateFromLeft(rightTupleFromDb, leftTuple, contextEntry, useLeftMemory, context, workingMemory); } iterator.close(); session.close(); }
From source file:org.drools.reteoo.JoinNode.java
License:Apache License
/** * Performs query to db. Operates on {@link Relationship} with * {@link ScrollableResults}. It's allows to operates on single fetched row, * not full query results. There is no need for flushing and/or clearing * cause that's read-only operation./*from w w w. j av a2 s . c om*/ * * @param context * - for propagating tuple. * @param workingMemory * - for propagating tuple. * @param memory * - for propagating tuple. * @param rightTuple * - the tuple to propagate. */ private void getAndPropagateDBTupleFromRight(final PropagationContext context, final InternalWorkingMemory workingMemory, final BetaMemory memory, final RightTuple rightTuple) { int counterToClear = 0; m_relManager = DbRelationshipManager.getInstance(); final Session session = m_relManager.openSession(); final Query query = m_relManager.createQueryGetRelsByJoinNodeId(this.getId()); final ScrollableResults iterator = m_relManager.getScrollableResultsIterator(query); while (iterator.next()) { final Relationship relationship = (Relationship) iterator.get()[0]; final LeftTuple tupleFromDb = createLeftTuple(relationship, this); if ((++counterToClear % DbRelationshipManager.BATCH_SIZE) == 0) { session.clear(); } propagateFromRight(rightTuple, tupleFromDb, memory, context, workingMemory); } iterator.close(); session.close(); }
From source file:org.eclipse.emf.cdo.server.internal.hibernate.HibernateQueryHandler.java
License:Open Source License
/** * Executes hql queries. Gets the session from the {@link HibernateStoreAccessor} creates a hibernate query and sets * the parameters taken from the {@link CDOQueryInfo#getParameters()}. Takes into account the * {@link CDOQueryInfo#getMaxResults()} and the {@link IHibernateStore#FIRST_RESULT} values for paging. * * @param info// ww w . j a v a 2 s . c om * the object containing the query and parameters * @param context * the query results are placed in the context * @see IQueryHandler#executeQuery(CDOQueryInfo, IQueryContext) */ public void executeQuery(CDOQueryInfo info, IQueryContext context) { // get a transaction, the hibernateStoreAccessor is placed in a threadlocal // so all db access uses the same session. final Session session = hibernateStoreAccessor.getHibernateSession(); try { // create the query final Query query = session.createQuery(info.getQueryString()); query.setReadOnly(true); // get the parameters with some parameter conversion int firstResult = -1; boolean cacheResults = true; for (String key : info.getParameters().keySet()) { if (key.compareToIgnoreCase(IHibernateStore.CACHE_RESULTS) == 0) { try { cacheResults = (Boolean) info.getParameters().get(key); } catch (ClassCastException e) { throw new IllegalArgumentException("Parameter " + IHibernateStore.CACHE_RESULTS //$NON-NLS-1$ + " must be a boolean. errorMessage " + e.getMessage()); } } else if (key.compareToIgnoreCase(IHibernateStore.FIRST_RESULT) == 0) { final Object o = info.getParameters().get(key); if (o != null) { try { firstResult = (Integer) o; } catch (ClassCastException e) { throw new IllegalArgumentException( "Parameter firstResult must be an integer but it is a " + o //$NON-NLS-1$ + " class " + o.getClass().getName()); //$NON-NLS-1$ } } } else { // in case the parameter is a CDOID get the object from the db final Object param = info.getParameters().get(key); if (param instanceof CDOID && HibernateUtil.getInstance().isStoreCreatedID((CDOID) param)) { final CDOID id = (CDOID) param; final String entityName = HibernateUtil.getInstance().getEntityName(id); final Serializable idValue = HibernateUtil.getInstance().getIdValue(id); final CDORevision revision = (CDORevision) session.get(entityName, idValue); query.setEntity(key, revision); if (cacheResults) { addToRevisionCache(revision); } } else { query.setParameter(key, param); } } } // set the first result if (firstResult > -1) { query.setFirstResult(firstResult); } // the max result if (info.getMaxResults() != CDOQueryInfo.UNLIMITED_RESULTS) { query.setMaxResults(info.getMaxResults()); } final ScrollableResults scroller = query.scroll(ScrollMode.FORWARD_ONLY); // and go for the query // future extension: support iterate, scroll through a parameter int i = 0; try { while (scroller.next()) { Object[] os = scroller.get(); Object o; if (os.length == 1) { o = handleAuditEntries(os[0]); } else { o = handleAuditEntries(os); } final boolean addOneMore = context.addResult(o); if (cacheResults && o instanceof CDORevision) { addToRevisionCache((CDORevision) o); } if (o instanceof InternalCDORevision) { ((InternalCDORevision) o).freeze(); } // clear the session every 1000 results or so if (i++ % 1000 == 0) { session.clear(); } if (!addOneMore) { return; } } } finally { scroller.close(); } } finally { session.close(); } }
From source file:org.eclipse.emf.cdo.server.internal.hibernate.HibernateStoreAccessor.java
License:Open Source License
public void queryXRefs(QueryXRefsContext context) { final Session session = getHibernateSession(); for (CDOID targetCdoId : context.getTargetObjects().keySet()) { final CDORevision revision = HibernateUtil.getInstance().getCDORevision(targetCdoId); final EClass targetEClass = context.getTargetObjects().get(targetCdoId); if (!getStore().isMapped(targetEClass)) { continue; }/* w w w . j a v a2s . c o m*/ final String targetEntityName = getStore().getEntityName(targetEClass); final Map<EClass, List<EReference>> sourceCandidates = context.getSourceCandidates(); for (EClass sourceEClass : sourceCandidates.keySet()) { if (!getStore().isMapped(sourceEClass)) { continue; } final String sourceEntityName = getStore().getEntityName(sourceEClass); for (EReference eref : sourceCandidates.get(sourceEClass)) { // handle transient ereferences if (!isEReferenceMapped(session, sourceEntityName, eref)) { continue; } final String hql; if (eref.isMany()) { hql = "select ref from " + sourceEntityName + " as ref, " + targetEntityName + " as refTo where refTo = :to and refTo in elements(ref." + eref.getName() + ")"; } else { hql = "select ref from " + sourceEntityName + " as ref where :to = ref." + eref.getName(); } final Query qry = session.createQuery(hql); qry.setEntity("to", revision); ScrollableResults result = qry.scroll(ScrollMode.FORWARD_ONLY); while (result.next()) { final InternalCDORevision sourceRevision = (InternalCDORevision) result.get()[0]; sourceRevision.freeze(); int sourceIndex = 0; if (eref.isMany()) { // note this takes performance for sure as the list is read, // consider not supporting sourceIndex, or doing it differently final WrappedHibernateList cdoList = (WrappedHibernateList) sourceRevision .getList(eref); sourceIndex = cdoList.getDelegate().indexOf(revision); } boolean more = context.addXRef(targetCdoId, sourceRevision.getID(), eref, sourceIndex); if (!more) { return; } } } } } }
From source file:org.gbif.portal.dao.DAOUtils.java
License:Open Source License
/** * //ww w.j a v a 2s . c o m * @param resultsOutputter * @param session * @param sr * @param associationTraverser * @param batchSize * @throws IOException */ public static void processScrollableResults(final ResultsOutputter resultsOutputter, Session session, ScrollableResults sr, AssociationTraverser associationTraverser, int batchSize) throws IOException { //indicate end of resultset boolean eor = false; int batchNo = 0; do { if (logger.isDebugEnabled()) { logger.debug("Running batch: " + (batchNo++)); } if (associationTraverser != null) associationTraverser.batchPreprocess(batchSize, sr, session); //process in batches for (int i = 0; i < batchSize && !eor; i++) { Object record = sr.get(); Map beanMap = null; //assemble all required model objects for rendering a single row if (associationTraverser != null) { beanMap = associationTraverser.traverse(record, session); } else { beanMap = new HashMap<String, Object>(); if (record != null && record instanceof Object[] && ((Object[]) record).length > 0) { beanMap.put("record", ((Object[]) record)[0]); } else { beanMap.put("record", record); } } //write out result resultsOutputter.write(beanMap); if (beanMap != null) { //evict from the session to keep memory footprint down for (Object recordElement : beanMap.entrySet()) { session.evict(recordElement); } beanMap = null; } //check to see if this is the last element in resultset if (sr.isLast()) { eor = true; } else { sr.next(); } } //post process if (associationTraverser != null) associationTraverser.batchPostprocess(batchSize, sr, session); //flush between batches - to remove objects from the session session.flush(); session.clear(); } while (!eor); }